diff --git a/.coderabbit.yaml b/.coderabbit.yaml
index 08e1985ae7..bdde403b77 100644
--- a/.coderabbit.yaml
+++ b/.coderabbit.yaml
@@ -13,6 +13,7 @@ reviews:
drafts: false
base_branches:
- develop
+ - develop-postgres
- main
chat:
auto_reply: true
diff --git a/.eslintrc.json b/.eslintrc.json
index 253591edde..b7af4acadc 100644
--- a/.eslintrc.json
+++ b/.eslintrc.json
@@ -170,6 +170,11 @@
"package-lock.json",
"tsconfig.json",
"docs/**",
- "examples/**"
+ "examples/**",
+ "docs/docusaurus.config.ts",
+ "docs/sidebars.ts",
+ "docs/src/**",
+ "docs/blog/**",
+ "pyproject.toml"
]
}
diff --git a/.flake8 b/.flake8
new file mode 100644
index 0000000000..a6046cda39
--- /dev/null
+++ b/.flake8
@@ -0,0 +1,3 @@
+[flake8]
+ignore = E402,E722,E203,F401,W503
+max-line-length = 80
diff --git a/.github/workflows/css_check.py b/.github/workflows/css_check.py
deleted file mode 100644
index 4c6aef06d2..0000000000
--- a/.github/workflows/css_check.py
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: UTF-8 -*-
-"""Check TypeScript files for CSS violations and embedded CSS."""
-
-import argparse
-import os
-import re
-import sys
-
-
-def check_embedded_css(content: str) -> list:
- """
- Check for embedded CSS in the content.
-
- Args:
- content: The content of the file to check.
-
- Returns:
- A list of embedded CSS violations found.
- """
- embedded_css_pattern = r"#([0-9a-fA-F]{3}){1,2}" # Matches CSS color codes
- return re.findall(embedded_css_pattern, content)
-
-
-def check_files(
- directory: str, exclude_files: list, exclude_directories: list, allowed_css_patterns: list
-) -> tuple:
- """
- Check TypeScript files for CSS violations and print correct CSS imports.
-
- Args:
- directory: The directory to check.
- exclude_files: List of files to exclude from analysis.
- exclude_directories: List of directories to exclude from analysis.
- allowed_css_patterns: List of allowed CSS file patterns.
-
- Returns:
- A tuple containing lists of violations, correct CSS imports, and embedded CSS violations.
- """
- violations = []
- correct_css_imports = []
- embedded_css_violations = []
-
- # Normalize exclude paths
- exclude_files = set(os.path.abspath(file) for file in exclude_files)
- exclude_directories = set(os.path.abspath(dir) for dir in exclude_directories)
-
- for root, _, files in os.walk(directory):
- # Skip excluded directories
- if any(root.startswith(exclude_dir) for exclude_dir in exclude_directories):
- continue
-
- for file in files:
- file_path = os.path.abspath(os.path.join(root, file))
-
- # Skip excluded files
- if file_path in exclude_files:
- continue
-
- # Process TypeScript files
- if file.endswith((".ts", ".tsx")) and "test" not in root:
- try:
- with open(file_path, "r", encoding="utf-8") as f:
- content = f.read()
- except (IOError, UnicodeDecodeError) as e:
- print(f"Error reading file {file_path}: {e}")
- continue
-
- # Check for CSS imports with an improved regex pattern
- css_imports = re.findall(
- r'import\s+.*?["\'](.*?\.css)["\'];', content
- )
- for css_file in css_imports:
- # Check if the CSS import matches the allowed patterns
- if any(css_file.endswith(pattern) for pattern in allowed_css_patterns):
- correct_css_imports.append(
- f"Correct CSS import ({css_file}) in {file_path}"
- )
- else:
- violations.append(
- f"Invalid CSS import ({css_file}) in {file_path}"
- )
-
- # Check for embedded CSS
- embedded_css = check_embedded_css(content)
- if embedded_css:
- embedded_css_violations.append(
- f"Embedded CSS found in {file_path}: {', '.join(embedded_css)}"
- )
-
- return violations, correct_css_imports, embedded_css_violations
-
-
-def main():
- """Run the CSS check script."""
- parser = argparse.ArgumentParser(
- description="Check for CSS violations in TypeScript files."
- )
- parser.add_argument("--directory", required=True, help="Directory to check.")
- parser.add_argument(
- "--exclude_files",
- nargs="*",
- default=[],
- help="Specific files to exclude from analysis.",
- )
- parser.add_argument(
- "--exclude_directories",
- nargs="*",
- default=[],
- help="Directories to exclude from analysis.",
- )
- parser.add_argument(
- "--allowed_css_patterns",
- nargs="*",
- default=["app.module.css"],
- help="Allowed CSS file patterns.",
- )
- args = parser.parse_args()
-
- violations, correct_css_imports, embedded_css_violations = check_files(
- directory=args.directory,
- exclude_files=args.exclude_files,
- exclude_directories=args.exclude_directories,
- allowed_css_patterns=args.allowed_css_patterns,
- )
-
- if violations:
- print("\nCSS Import Violations:")
- print("\n".join(violations))
-
- if embedded_css_violations:
- print("\nEmbedded CSS Violations:")
- print("\n".join(embedded_css_violations))
-
- if correct_css_imports:
- print("\nCorrect CSS Imports:")
- print("\n".join(correct_css_imports))
- else:
- print("\nNo correct CSS imports found.")
-
- if violations or embedded_css_violations:
- sys.exit(1) # Exit with error code if violations found
- else:
- print("\nNo CSS violations found.")
- sys.exit(0) # Exit with success code
-
-
-if __name__ == "__main__":
- main()
-
diff --git a/.github/workflows/pull-request.yml b/.github/workflows/pull-request.yml
index 20c283c797..a6f0dc0b5f 100644
--- a/.github/workflows/pull-request.yml
+++ b/.github/workflows/pull-request.yml
@@ -40,19 +40,6 @@ jobs:
chmod +x ./.github/workflows/scripts/countline.py
./.github/workflows/scripts/countline.py --lines 600 --exclude_files src/screens/LoginPage/LoginPage.tsx src/GraphQl/Queries/Queries.ts src/screens/OrgList/OrgList.tsx src/GraphQl/Mutations/mutations.ts src/components/EventListCard/EventListCardModals.tsx src/components/TagActions/TagActionsMocks.ts src/utils/interfaces.ts src/screens/MemberDetail/MemberDetail.tsx
- # Run the CSS import check script
- - name: Check for CSS violations and print correct imports
- run: |
- if [ ! -f ./.github/workflows/css_check.py ]; then
- echo "Error: CSS check script not found"
- exit 1
- fi
- chmod +x ./.github/workflows/css_check.py
- ./.github/workflows/css_check.py --directory . || {
- echo "Error: CSS check failed"
- exit 1
- }
-
- name: Get changed TypeScript files
id: changed-files
uses: tj-actions/changed-files@v45
@@ -109,6 +96,9 @@ jobs:
uses: tj-actions/changed-files@v45
with:
files: |
+ .flake8
+ .pydocstyle
+ pyproject.toml
.env*
vitest.config.js
src/App.tsx
@@ -255,27 +245,50 @@ jobs:
uses: tj-actions/changed-files@v45
- name: Run Jest Tests
- if: steps.changed-files.outputs.only_changed != 'true'
+ if: steps.changed-files.outputs.any_changed == 'true'
env:
NODE_V8_COVERAGE: './coverage/jest'
run: |
npm run test -- --watchAll=false --coverage
+ - name: Upload Jest Coverage to Codecov
+ if: steps.changed-files.outputs.any_changed == 'true'
+ uses: codecov/codecov-action@v4
+ with:
+ token: ${{ secrets.CODECOV_TOKEN }}
+ files: ./coverage/jest/lcov.info
+ flags: jest
+ fail_ci_if_error: true
+
- name: Run Vitest Tests
- if: steps.changed-files.outputs.only_changed != 'true'
+ if: steps.changed-files.outputs.any_changed == 'true'
env:
NODE_V8_COVERAGE: './coverage/vitest'
run: |
npm run test:vitest:coverage
- - name: Merge Coverage Reports
- if: steps.changed-files.outputs.only_changed != 'true'
+ - name: Upload Vitest Coverage to Codecov
+ if: steps.changed-files.outputs.any_changed == 'true'
+ uses: codecov/codecov-action@v4
+ with:
+ token: ${{ secrets.CODECOV_TOKEN }}
+ files: ./coverage/vitest/lcov.info
+ flags: vitest
+ fail_ci_if_error: true
+
+ - name: Merge Jest and Vitest Coverage Reports
run: |
- mkdir -p coverage
- if ! npx lcov-result-merger 'coverage/*/lcov.info' > 'coverage/lcov.info'; then
- echo "Failed to merge coverage reports"
- exit 1
- fi
+ mkdir -p ./coverage
+ npx lcov-result-merger './coverage/*/lcov.info' './coverage/lcov.info'
+
+ - name: Upload Combined Coverage to Codecov
+ uses: codecov/codecov-action@v4
+ with:
+ token: ${{ secrets.CODECOV_TOKEN }}
+ gcov_ignore: 'docs/'
+ files: ./coverage/lcov.info
+ flags: combined
+ fail_ci_if_error: true
- name: TypeScript compilation for changed files
run: |
@@ -285,16 +298,6 @@ jobs:
fi
done
- - name: Present and Upload coverage to Codecov as ${{env.CODECOV_UNIQUE_NAME}}
- uses: codecov/codecov-action@v4
- with:
- token: ${{ secrets.CODECOV_TOKEN }}
- verbose: true
- gcov_ignore: 'docs/'
- fail_ci_if_error: false
- files: './coverage/lcov.info'
- name: '${{env.CODECOV_UNIQUE_NAME}}'
-
- name: Test acceptable level of code coverage
uses: VeryGoodOpenSource/very_good_coverage@v3
with:
@@ -462,3 +465,54 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ github.event.pull_request.number }}
GITHUB_REPOSITORY: ${{ github.repository }}
+
+
+ Python-Compliance:
+ name: Check Python Code Style
+ runs-on: ubuntu-latest
+ needs: [Code-Quality-Checks]
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Set up Python 3.11
+ uses: actions/setup-python@v4
+ with:
+ python-version: 3.11
+
+ - name: Cache pip packages
+ uses: actions/cache@v4
+ with:
+ path: ~/.cache/pip
+ key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
+ restore-keys: |
+ ${{ runner.os }}-pip-
+
+ - name: Install dependencies
+ run: |
+ python3 -m venv venv
+ source venv/bin/activate
+ python -m pip install --upgrade pip
+ pip install -r .github/workflows/requirements.txt
+
+ - name: Run Black Formatter Check
+ run: |
+ source venv/bin/activate
+ black --check .
+
+ - name: Run Flake8 Linter
+ run: |
+ source venv/bin/activate
+ flake8 --docstring-convention google --ignore E402,E722,E203,F401,W503 .github
+
+ - name: Run pydocstyle
+ run: |
+ source venv/bin/activate
+ pydocstyle --convention=google --add-ignore=D415,D205 .github
+
+ - name: Run docstring compliance check
+ run: |
+ source venv/bin/activate
+ python .github/workflows/scripts/check_docstrings.py --directories .github
diff --git a/.github/workflows/push-deploy-website.yml b/.github/workflows/push-deploy-website.yml
index e03dab9840..5af7c90094 100644
--- a/.github/workflows/push-deploy-website.yml
+++ b/.github/workflows/push-deploy-website.yml
@@ -24,7 +24,7 @@ jobs:
name: Deploy https://docs-admin.talawa.io website
runs-on: ubuntu-latest
# Run only if the develop-postgres branch and not dependabot
- if: ${{ github.actor != 'dependabot[bot]' }}
+ if: ${{ github.actor != 'dependabot[bot]' && github.event.pull_request.base.ref == 'develop-postgres' }}
environment:
# This "name" has to be the repos' branch that contains
# the current active website. There must be an entry for
diff --git a/.github/workflows/requirements.txt b/.github/workflows/requirements.txt
new file mode 100644
index 0000000000..d27230d569
--- /dev/null
+++ b/.github/workflows/requirements.txt
@@ -0,0 +1,14 @@
+#############################################################################
+# DO NOT DELETE
+#############################################################################
+#
+# Required for GitHub Action workflow python checks
+#
+#############################################################################
+#############################################################################
+
+black
+pydocstyle
+flake8
+flake8-docstrings
+docstring_parser
diff --git a/.github/workflows/scripts/check_docstrings.py b/.github/workflows/scripts/check_docstrings.py
new file mode 100755
index 0000000000..814729765c
--- /dev/null
+++ b/.github/workflows/scripts/check_docstrings.py
@@ -0,0 +1,773 @@
+#!/usr/bin/env python3
+"""Script to check for docstrings."""
+
+import os
+import re
+import sys
+import argparse
+from collections import namedtuple
+from docstring_parser import parse
+
+Violation = namedtuple("Violation", "line function issue action")
+
+
+def validate_docstring(file_path):
+ """Validate docstrings in a file for compliance with the Google style guide.
+
+ Args:
+ file_path (str): Path to the Python file to validate.
+
+ Returns:
+ list: List of violations found in the file, with details about
+ the issue and corrective action.
+
+ """
+ # Initialize key variables
+ violations = []
+
+ # Read the file for processing
+ try:
+ with open(file_path, "r", encoding="utf-8") as fh_:
+ lines_with_hard_returns = fh_.readlines()
+
+ except Exception:
+ return violations
+
+ # Remove hard returns at the end of each line read
+ lines = [_.rstrip() for _ in lines_with_hard_returns]
+
+ # Evaluate each line
+ for line_number, line in enumerate(lines):
+
+ # Identify sections of the file that are functions or methods
+ if re.match(r"^\s*def ", line):
+ # Get the function name and its arguments
+ function = extract_function_arguments(line_number, lines)
+
+ # Ignore test functions in test files
+ if ignore_function(function, file_path):
+ continue
+
+ # Skip if there are python decorator exceptions
+ decorator = function_has_decorator(line_number, lines)
+ if bool(decorator):
+ if decorator_in_docstring_exception_list(decorator):
+ continue
+
+ # Get the docstring
+ docstring = extract_docstring(function.name, line_number, lines)
+ if bool(docstring.violations):
+ # Add the violation to the list
+ violations.extend(docstring.violations)
+
+ # Evaluate the relationship between the
+ # declared variables and the docstring
+ if bool(docstring.fatal) is False:
+ bad = match_arguments_to_docstring(
+ function, docstring, line_number
+ )
+ if bool(bad):
+ violations.extend(bad)
+
+ # Return
+ return violations
+
+
+def ignore_function(function, file_path):
+ """Extract the docstring from a list of lines read from a file.
+
+ Args:
+ function: Function object
+ file_path: Path to file under test
+
+ Returns:
+ result: True if function must be ignored
+
+ """
+ # Initialize key variables
+ result = False
+ ignores = ["test_", "tearDownClass", "setUpClass", "setUp", "tearDown"]
+
+ # Ignore test functions in test files
+ for ignore in ignores:
+ if function.name.startswith(ignore) and ("test_" in file_path):
+ result = True
+
+ # Return
+ return result
+
+
+def match_arguments_to_docstring(function, docstring, line_number):
+ """Extract the docstring from a list of lines read from a file.
+
+ Args:
+ function: Function object
+ docstring: Docstring object
+ line_number: Number on which the function resides
+
+ Returns:
+ result: Violation object list
+
+ """
+ # Initialize key variables
+ violations = []
+ bad_argument_function = False
+ bad_argument_docstring = False
+ arguments_function = function.arguments
+ arguments_docstring = [_.arg_name for _ in docstring.parser.params]
+
+ # Violation if the arguments don't match and return
+ if sorted(arguments_function) != sorted(arguments_docstring):
+ violations.append(
+ Violation(
+ line=line_number + 1,
+ function=function.name,
+ issue="""\
+The arguments defined in the docstring don't match those of the function.""",
+ action="""\
+Adjust your docstring to match the listed function arguments.""",
+ )
+ )
+ return violations
+
+ ######################################################################
+ # Logic below only works when both the function and doctring have args
+ ######################################################################
+
+ # Check whether docstring arguments match function arguments
+ for argument_function in arguments_function:
+ # Track whether the argument is defined
+ # in the docstring parameters
+ for argument_docstring in arguments_docstring:
+ if argument_docstring not in arguments_function:
+ violations.append(
+ Violation(
+ line=line_number + 1,
+ function=function.name,
+ issue=f"""\
+Argument '{argument_docstring}' defined in the docstring is not \
+an argument in the function""",
+ action=f"""\
+Remove argument '{argument_docstring}' from the docstring""",
+ )
+ )
+ bad_argument_function = True
+ break
+ if bad_argument_function:
+ break
+
+ # We found an error, no need to continue generating violations
+ if not bad_argument_function:
+ # Check whether docstring arguments match function arguments
+ for argument_docstring in arguments_docstring:
+ # Track whether the argument is defined
+ # in the function parameters
+ for argument_function in arguments_function:
+ if argument_function not in arguments_docstring:
+ violations.append(
+ Violation(
+ line=line_number + 1,
+ function=function.name,
+ issue=f"""\
+ Argument '{argument_function}' defined in the function is not \
+ an argument in the docstring""",
+ action=f"""\
+ Add argument '{argument_function}' to the Docstring""",
+ )
+ )
+ bad_argument_docstring = True
+ break
+ if bad_argument_docstring:
+ break
+
+ # Return
+ return violations
+
+
+def function_has_decorator(start, lines):
+ """Extract the arguments of a function read from a file.
+
+ Args:
+ start: Starting line to process
+ lines: The file as a list of strings split by a new line separator
+
+ Returns:
+ result: The decorator line
+
+ """
+ # Initialize key variable
+ result = None
+
+ # Return
+ if start > 0:
+ previous_line = lines[start - 1].strip()
+ if previous_line.startswith("@"):
+ result = previous_line
+ return result
+
+
+def decorator_in_docstring_exception_list(item):
+ """Extract the arguments of a function read from a file.
+
+ Args:
+ item: Decorator to check
+
+ Returns:
+ result: True if an exception
+
+ """
+ # Initialize key variable
+ result = False
+ exceptions = ["@property"]
+ property_exceptions = ["setter", "getter"]
+
+ # Return
+ for exception in exceptions:
+ if exception in item.strip():
+ result = True
+ break
+
+ for exception in property_exceptions:
+ regex = f"^@[a-zA-Z0-9_]*.{exception}$"
+ if re.match(regex, item):
+ result = True
+ break
+
+ # Return
+ return result
+
+
+def extract_function_arguments(start, lines):
+ """Extract the arguments of a function read from a file.
+
+ Args:
+ start: Starting line to process
+ lines: List of lines in the file
+
+ Returns:
+ result: Function object
+
+ """
+ # Initialize key variables
+ func = ""
+ possibles = lines[start:]
+ arguments = []
+ Function = namedtuple("Function", "name arguments")
+ method_keywords = ["self", "cls"]
+
+ # Process the function
+ for line in possibles:
+ if bool(line) is False:
+ continue
+ elif ("'''" not in line) and ('"""' not in line):
+ func = f"{func}{line.strip()}"
+ else:
+ break
+
+ # Get the arguments
+ items = func.split("(")[1].split(",")
+ name = func.split()[1].split("(")[0].strip()
+ for item in items:
+ result = item.split(")")[0].split("=")[0].strip()
+ if bool(result):
+ # Sometimes arguments have colons. We need everything before.
+ arguments.append(result.split(":")[0].strip())
+
+ # Fix arguments for methods
+ for keyword in method_keywords:
+ if keyword in arguments:
+ arguments.remove(keyword)
+
+ # Return
+ result = Function(name=name, arguments=arguments)
+ return result
+
+
+def extract_docstring(func_name, line_number, lines):
+ """Extract the docstring from a list of lines read from a file.
+
+ Args:
+ line_number: Line where the function starts
+ lines: The file as a list of strings split by a new line separator
+ func_name: Name of the function for the docstring
+
+ Returns:
+ result: namedtuple containing the docstring, and status
+
+ """
+ # Initialize key variables
+ violations = []
+ parser = None
+ fatal = False
+ Docstring = namedtuple(
+ "Docstring", "violations docstring parser arguments fatal"
+ )
+ docstring = ""
+ arguments = []
+ found_start = False
+ found_end = False
+
+ # Process Docstring
+ docstring_start = line_number
+ while docstring_start < len(lines):
+ if bool(is_docstring_delimiter(lines[docstring_start])) is False:
+ docstring_start += 1
+ else:
+ found_start = True
+ break
+
+ # Identify the start of the Docstring
+ if bool(found_start) is True:
+ # Identify the end of the docstring
+ docstring_end = docstring_start + 1
+ while docstring_end < len(lines):
+ if bool(is_docstring_delimiter(lines[docstring_end])) is False:
+ docstring_end += 1
+ else:
+ found_end = True
+ break
+
+ # Check to make sure there are defined arguments
+ if bool(found_end) is False:
+ violations.append(
+ Violation(
+ line=line_number + 1,
+ function=func_name,
+ issue="""\
+Single line docstring without 'Args:' or 'Results:' sections defined.""",
+ action="""Define the 'Args:' or 'Results:' sections.""",
+ )
+ )
+ fatal = True
+
+ # Extract lines within the docstring area
+ if found_start and found_end:
+
+ # Get the lines of the Docstring, strip hard returns
+ valid_lines = lines[docstring_start : docstring_end + 1]
+
+ # Convert the docstring lines to a string
+ docstring = "\n".join(valid_lines)
+
+ # Parse the docstring
+ try:
+ parser = parse(docstring)
+
+ except Exception as e:
+ violations.append(
+ Violation(
+ line=docstring_start,
+ function=func_name,
+ issue="Docstring parsing error",
+ action=f"""\
+Ensure the docstring is properly formatted: {e}""",
+ )
+ )
+
+ # Evaluate Docstring description
+ docstring_evaluation = evaluate_docstring_description(
+ func_name, docstring_start, parser
+ )
+ if bool(docstring_evaluation):
+ violations.extend(docstring_evaluation)
+
+ # Evaluate the Args: section
+ argument_evaluation = evaluate_docstring_args(
+ func_name, docstring_start, docstring, parser
+ )
+ if bool(argument_evaluation.violations):
+ violations.extend(argument_evaluation.violations)
+ else:
+ # Update docstring arguments as they are valid
+ arguments = argument_evaluation.arguments
+
+ # Evaluate the Returns: section
+ bad_returns = evaluate_docstring_returns(
+ func_name, docstring_start, docstring, parser
+ )
+ if bool(bad_returns):
+ violations.extend(bad_returns)
+
+ else:
+ violations.append(
+ Violation(
+ line=docstring_start,
+ function=func_name,
+ issue="Unclosed docstring",
+ action="""\
+Ensure the docstring is properly closed with triple quotes.""",
+ )
+ )
+
+ else:
+ violations.append(
+ Violation(
+ line=docstring_start,
+ function=func_name,
+ issue="Missing docstring",
+ action="""\
+Add a Google-style docstring to describe this function.""",
+ )
+ )
+
+ # Return result
+ result = Docstring(
+ docstring=docstring,
+ violations=violations if bool(violations) else None,
+ parser=parser,
+ arguments=arguments,
+ fatal=fatal,
+ )
+ return result
+
+
+def evaluate_docstring_description(func_name, docstring_start, parser):
+ """Evaluate the Docstring description for validity.
+
+ Args:
+ func_name: Function name
+ docstring_start: Line in file on which the docstring starts
+ parser: Docstring parser
+
+ Returns:
+ violations: List of Violations objects
+
+ """
+ # Initialize key variables
+ violations = []
+
+ # Ensure there is an Docstring description
+ short_description = (
+ parser.short_description.strip().replace("'''", "").replace('"""', "")
+ )
+ if bool(short_description) is False:
+ violations.append(
+ Violation(
+ line=docstring_start,
+ function=func_name,
+ issue="Docstring doesn't have a valid description",
+ action="""\
+Add a docstring description to the first line.""",
+ )
+ )
+
+ if bool(parser.blank_after_short_description) is False:
+ violations.append(
+ Violation(
+ line=docstring_start,
+ function=func_name,
+ issue="\
+The Docstring's short description on the first line doesn't \
+have a blank line after it.",
+ action="""\
+Add the trailing blank line.""",
+ )
+ )
+
+ return violations
+
+
+def evaluate_docstring_args(func_name, docstring_start, docstring, parser):
+ """Evaluate the Docstring arguments for validity.
+
+ Args:
+ func_name: Function name
+ docstring_start: Line in file on which the docstring starts
+ docstring: Docstring
+ parser: Docstring parser
+
+ Returns:
+ result: DocstringEvaluation object
+
+ """
+ # Initialize key variables
+ DocstringEvaluation = namedtuple(
+ "DocstringEvaluation", "violations arguments"
+ )
+ violations = []
+ arguments = []
+ docstring_no_multiple_white_space = " ".join(docstring.split())
+
+ if "Args: None " in docstring_no_multiple_white_space:
+ return DocstringEvaluation(violations=violations, arguments=arguments)
+ else:
+ # Check for Args section
+ if "Args:" not in docstring:
+ violations.append(
+ Violation(
+ line=docstring_start,
+ function=func_name,
+ issue="Missing 'Args' section",
+ action="""\
+Add an 'Args:' section listing the arguments this function accepts.""",
+ )
+ )
+ else:
+ # Ensure there is an Args section
+ if bool(parser.params) is False:
+ violations.append(
+ Violation(
+ line=docstring_start,
+ function=func_name,
+ issue="Docstring doesn't have a valid 'Args:' section",
+ action="""\
+Add an 'Args:' section with values to the function's docstring""",
+ )
+ )
+ else:
+ # Evaluate each argument
+ for argument in parser.params:
+ if bool(argument.arg_name) is False:
+ violations.append(
+ Violation(
+ line=docstring_start,
+ function=func_name,
+ issue="""\
+Docstring has no 'Args:' section variable name and description.""",
+ action="""\
+Add an 'Args:' section with a variable name and description to \
+the function's docstring""",
+ )
+ )
+ if bool(argument.description) is False:
+ violations.append(
+ Violation(
+ line=docstring_start,
+ function=func_name,
+ issue=f"""\
+Docstring 'Args:' section variable '{argument.arg_name}' \
+needs a description.""",
+ action="Add description to the variable.",
+ )
+ )
+
+ # Get the valid arguments
+ if bool(violations) is False:
+ arguments = [_.arg_name for _ in parser.params]
+
+ # Return
+ result = DocstringEvaluation(violations=violations, arguments=arguments)
+ return result
+
+
+def evaluate_docstring_returns(func_name, docstring_start, docstring, parser):
+ """Determine whether string is docstring start or stop.
+
+ Args:
+ func_name: Function name
+ docstring_start: Line in file on which the docstring starts
+ docstring: Docstring
+ parser: Docstring parser
+
+ Returns:
+ violations: list of violations
+
+ """
+ # Initialize key variables
+ violations = []
+ docstring_no_multiple_white_space = " ".join(docstring.split())
+
+ # Check for Returns section
+ if "Returns:" not in docstring:
+ violations.append(
+ Violation(
+ line=docstring_start,
+ function=func_name,
+ issue="Missing 'Returns:' section",
+ action="""\
+Add a 'Returns:' section describing the return value.""",
+ )
+ )
+ elif "Returns: None " not in docstring_no_multiple_white_space:
+
+ # The parser fails if the 'Args:' section is set to None AND there
+ # is a valid 'Returns:' section
+ # This is a workaround where we search for 'Returns: VARIABLE: '
+ regex = r"^.*\s+Returns: (\S+): ([a-zA-Z0-9_]*).*$"
+ regex_match = re.match(regex, docstring_no_multiple_white_space)
+ if bool(parser.params) is False:
+ if bool(regex_match) is False:
+ violations.append(
+ Violation(
+ line=docstring_start,
+ function=func_name,
+ issue="""\
+Docstring has improperly formatted 'Returns:' section""",
+ action="""\
+Add a correctly formatted 'Returns:' section to the function's docstring""",
+ )
+ )
+ else:
+ if bool(regex_match.group(2)) is False:
+ violations.append(
+ Violation(
+ line=docstring_start,
+ function=func_name,
+ issue="""\
+Docstring 'Returns:' section with no description""",
+ action="""\
+Add a description to the 'Returns:' section to the function's docstring""",
+ )
+ )
+ return violations
+
+ # Ensure there is an Returns section
+ if bool(parser.returns) is False:
+ violations.append(
+ Violation(
+ line=docstring_start,
+ function=func_name,
+ issue="Docstring has no 'Returns:' section",
+ action="""\
+Add a 'Returns:' section to the function's docstring""",
+ )
+ )
+ return violations
+
+ # Ensure there is an Returns section value
+ if bool(parser.returns.type_name) is False:
+ violations.append(
+ Violation(
+ line=docstring_start,
+ function=func_name,
+ issue="""\
+Docstring has no 'Returns:' section variable name and description. \
+If the return value is 'None', then use 'None'""",
+ action="""\
+Add a 'Returns:' section with a variable name and description to \
+the function's docstring""",
+ )
+ )
+
+ elif bool(parser.returns.description) is False:
+ violations.append(
+ Violation(
+ line=docstring_start,
+ function=func_name,
+ issue=f"""\
+Docstring 'Returns:' section variable \
+'{parser.returns.type_name}' needs a description.""",
+ action="""Add description to the variable.""",
+ )
+ )
+
+ # Return
+ return violations
+
+
+def is_docstring_delimiter(line):
+ """Determine whether string is docstring start or stop.
+
+ Args:
+ line: String of text
+
+ Returns:
+ result: True if it's a delimiter
+
+ """
+ # Return
+ result = bool(
+ line.strip().startswith('"""') or line.strip().startswith("'''")
+ )
+ return result
+
+
+def check_directory(directory, exclude_dirs=None):
+ """Check all Python files in a directory for docstring compliance.
+
+ Specified directories are excluded.
+
+ Args:
+ directory (str): Directory to scan.
+ exclude_dirs (list): List of directories to exclude.
+
+ Returns:
+ dict: Dictionary of file violations.
+ """
+ # Initialize key variables
+ all_violations = {}
+ _exclude_dirs = exclude_dirs if bool(exclude_dirs) else []
+
+ # Recursive directory search for files
+ for root, dirs, files in os.walk(directory):
+ # Skip excluded directories
+ dirs[:] = [
+ d for d in dirs if os.path.join(root, d) not in _exclude_dirs
+ ]
+
+ # Process files in each directory
+ for file in files:
+ if file.endswith(".py"):
+ # Print start of processing
+ file_path = os.path.join(root, file)
+
+ # Identify violations in the file
+ violations = validate_docstring(file_path)
+
+ # Add any found violations
+ if violations:
+ all_violations[file_path] = violations
+
+ # Return
+ return all_violations
+
+
+def main():
+ """Start checking the docstrings.
+
+ Args:
+ None
+
+ Returns:
+ None
+ """
+ # Header for the help menu of the application
+ parser = argparse.ArgumentParser(
+ description="""\
+This script checks specified directories for compliance with the \
+Google Docstring 'Args' and 'Returns' sections.""",
+ formatter_class=argparse.RawTextHelpFormatter,
+ )
+
+ # CLI argument for starting
+ parser.add_argument(
+ "--directories",
+ required=False,
+ default=".",
+ nargs="+",
+ type=str,
+ help="Directories to scan for docsctring compliant python files.",
+ )
+ args = parser.parse_args()
+
+ # Process the directories
+ for directory in args.directories:
+ # Identify violations
+ violations = check_directory(directory, exclude_dirs=None)
+
+ # Create a message for the violation
+ if violations:
+ print("")
+ for file, issues in sorted(violations.items()):
+ for issue in issues:
+ print(
+ f"""\
+File Docstring Error: {file}
+Line : {issue.line}
+Function: {issue.function}
+Issue: {issue.issue}
+Corrective Action: {issue.action}
+"""
+ )
+ print(
+ f"""\
+Follow the online 'Google Python Style Guide' for our docstring expectations.
+There are {len(violations)} docstring violations
+"""
+ )
+
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.github/workflows/scripts/code_coverage_disable_check.py b/.github/workflows/scripts/code_coverage_disable_check.py
index 1a55c3f720..f47b24fb21 100644
--- a/.github/workflows/scripts/code_coverage_disable_check.py
+++ b/.github/workflows/scripts/code_coverage_disable_check.py
@@ -8,7 +8,7 @@
This script enforces proper code coverage practices in the project.
-NOTE:
+Note:
This script complies with our python3 coding and documentation standards.
It complies with:
@@ -27,8 +27,7 @@
def has_code_coverage_disable(file_path):
- """
- Check if a TypeScript file contains code coverage disable statements.
+ """Check if a TypeScript file contains code coverage disable statements.
Args:
file_path (str): Path to the TypeScript file.
@@ -38,10 +37,11 @@ def has_code_coverage_disable(file_path):
otherwise.
"""
code_coverage_disable_pattern = re.compile(
- r"""//?\s*istanbul\s+ignore(?:\s+(?:next|-line))?[^\n]*|
- /\*\s*istanbul\s+ignore\s+(?:next|-line)\s*\*/""",
+ r"/\*\s*istanbul\s+ignore"
+ r".*?\*/|//?\s*istanbul\s+ignore(?:\s+(?:next|-line))?[^\n]*",
re.IGNORECASE,
)
+
try:
with open(file_path, "r", encoding="utf-8") as file:
content = file.read()
@@ -58,8 +58,7 @@ def has_code_coverage_disable(file_path):
def check_code_coverage(files_or_dirs):
- """
- Check TypeScript files for code coverage disable statements.
+ """Check TypeScript files for code coverage disable statements.
Args:
files_or_dirs (list): List of files or directories to check.
@@ -88,7 +87,8 @@ def check_code_coverage(files_or_dirs):
file_path = os.path.join(root, file_name)
if has_code_coverage_disable(file_path):
print(
- f"""File {file_path} contains code coverage disable statement."""
+ f"""\
+File {file_path} contains code coverage disable statement."""
)
code_coverage_found = True
elif os.path.isfile(item):
@@ -103,7 +103,9 @@ def check_code_coverage(files_or_dirs):
):
if has_code_coverage_disable(item):
print(
- f"""File {item} contains code coverage disable statement. Please remove it and add the appropriate tests."""
+ f"""\
+File {item} contains code coverage disable statement. \
+Please remove it and add the appropriate tests."""
)
code_coverage_found = True
@@ -113,6 +115,9 @@ def check_code_coverage(files_or_dirs):
def arg_parser_resolver():
"""Resolve the CLI arguments provided by the user.
+ Args:
+ None
+
Returns:
result: Parsed argument object
"""
@@ -137,8 +142,7 @@ def arg_parser_resolver():
def main():
- """
- Execute the script's main functionality.
+ """Execute the script's main functionality.
This function serves as the entry point for the script. It performs
the following tasks:
@@ -148,6 +152,12 @@ def main():
3. Provides informative messages based on the analysis.
4. Exits with an error if code coverage disable statements are found.
+ Args:
+ None
+
+ Returns:
+ None
+
Raises:
SystemExit: If an error occurs during execution.
"""
diff --git a/.github/workflows/scripts/compare_translations.py b/.github/workflows/scripts/compare_translations.py
index ef65b6c52b..e1474d9ad5 100644
--- a/.github/workflows/scripts/compare_translations.py
+++ b/.github/workflows/scripts/compare_translations.py
@@ -1,12 +1,13 @@
"""Script to encourage more efficient coding practices.
+
Methodology:
Utility for comparing translations between default and other languages.
This module defines a function to compare two translations
and print any missing keys in the other language's translation.
-Attributes:
+Attributes:
FileTranslation : Named tuple to represent a combination
of file and missing translations.
@@ -37,7 +38,8 @@
Example:
python compare_translations.py
-NOTE:
+
+Note:
This script complies with our python3 coding and documentation standards
and should be used as a reference guide. It complies with:
@@ -47,6 +49,7 @@
4) Flake8
"""
+
# standard imports
import argparse
import json
@@ -56,19 +59,21 @@
# Named tuple for file and missing
# translations combination
-FileTranslation = namedtuple("FileTranslation",
- ["file", "missing_translations"])
+FileTranslation = namedtuple(
+ "FileTranslation", ["file", "missing_translations"]
+)
-def compare_translations(default_translation,
- other_translation, default_file, other_file):
- """Compare two translations and return detailed info about missing/mismatched keys.
+def compare_translations(
+ default_translation, other_translation, default_file, other_file
+):
+ """Compare two translations for missing and/or mismatched keys.
Args:
default_translation (dict): The default translation (en.json).
other_translation (dict): The other language translation.
default_file (str): The name of the default translation file.
- other_file (str): The name of the other
+ other_file (str): The name of the other
translation file.
Returns:
@@ -79,22 +84,27 @@ def compare_translations(default_translation,
# Check for missing keys in other_translation
for key in default_translation:
if key not in other_translation:
- error_msg = f"Missing Key: '{key}' - This key from '{default_file}' is missing in '{other_file}'."
+ error_msg = f"""\
+Missing Key: '{key}' - This key from '{default_file}' \
+is missing in '{other_file}'."""
errors.append(error_msg)
- # Check for keys in other_translation that don't match any in default_translation
+ # Check for keys in other_translation that don't
+ # match any in default_translation
for key in other_translation:
if key not in default_translation:
- error_msg = f"Error Key: '{key}' - This key in '{other_file}' does not match any key in '{default_file}'."
+ error_msg = f"""\
+Error Key: '{key}' - This key in '{other_file}' \
+does not match any key in '{default_file}'."""
errors.append(error_msg)
return errors
+
def flatten_json(nested_json, parent_key=""):
- """
- Flattens a nested JSON, concatenating keys to represent the hierarchy.
+ """Flattens a nested JSON, concatenating keys to represent the hierarchy.
Args:
nested_json (dict): The JSON object to flatten.
- parent_key (str): The base key for recursion (used to track key hierarchy).
+ parent_key (str): The base key for recursion to track key hierarchy.
Returns:
dict: A flattened dictionary with concatenated keys.
@@ -104,7 +114,7 @@ def flatten_json(nested_json, parent_key=""):
for key, value in nested_json.items():
# Create the new key by concatenating parent and current key
new_key = f"{parent_key}.{key}" if parent_key else key
-
+
if isinstance(value, dict):
# Recursively flatten the nested dictionary
flat_dict.update(flatten_json(value, new_key))
@@ -114,6 +124,7 @@ def flatten_json(nested_json, parent_key=""):
return flat_dict
+
def load_translation(filepath):
"""Load translation from a file.
@@ -154,7 +165,6 @@ def check_translations(directory):
languages = os.listdir(directory)
languages.remove("en") # Exclude default language directory
-
error_found = False
for language in languages:
@@ -166,7 +176,10 @@ def check_translations(directory):
# Compare translations and get detailed error messages
errors = compare_translations(
- default_translation, other_translation, f"en/{file}", f"{language}/{file}"
+ default_translation,
+ other_translation,
+ f"en/{file}",
+ f"{language}/{file}",
)
if errors:
error_found = True
@@ -174,7 +187,6 @@ def check_translations(directory):
for error in errors:
print(f" - {error}")
-
if error_found:
sys.exit(1) # Exit with an error status code
else:
@@ -183,26 +195,38 @@ def check_translations(directory):
def main():
- """
+ """Compare translations.
- Parse command-line arguments, check for the existence of the specified directory
- and call check_translations with the provided or default directory.
+ Parse command-line arguments, check for the existence of the specified
+ directory and call check_translations with the provided or default
+ directory.
+
+ Args:
+ None
+
+ Returns:
+ None
"""
+ # Initialize key variables
parser = argparse.ArgumentParser(
- description="Check and print missing translations for all non-default languages."
+ description="""\
+Check and print missing translations for all non-default languages."""
)
parser.add_argument(
"--directory",
type=str,
nargs="?",
default=os.path.join(os.getcwd(), "public/locales"),
- help="Directory containing translation files(relative to the root directory).",
+ help="""\
+Directory containing translation files(relative to the root directory).""",
)
args = parser.parse_args()
if not os.path.exists(args.directory):
- print(f"Error: The specified directory '{args.directory}' does not exist.")
+ print(
+ f"Error: The specified directory '{args.directory}' does not exist."
+ )
sys.exit(1)
check_translations(args.directory)
diff --git a/.github/workflows/scripts/countline.py b/.github/workflows/scripts/countline.py
index d0b03c503f..5c3ee5d117 100755
--- a/.github/workflows/scripts/countline.py
+++ b/.github/workflows/scripts/countline.py
@@ -10,8 +10,7 @@
This script was created to help improve code quality by encouraging
contributors to create reusable code.
-NOTE:
-
+Note:
This script complies with our python3 coding and documentation standards
and should be used as a reference guide. It complies with:
@@ -24,7 +23,6 @@
your pull requests.
"""
-
# Standard imports
import os
import sys
diff --git a/.github/workflows/scripts/eslint_disable_check.py b/.github/workflows/scripts/eslint_disable_check.py
index a24a80949e..45ce52b84a 100644
--- a/.github/workflows/scripts/eslint_disable_check.py
+++ b/.github/workflows/scripts/eslint_disable_check.py
@@ -10,7 +10,7 @@
This script enforces code quality practices in the project.
-NOTE:
+Note:
This script complies with our python3 coding and documentation standards.
It complies with:
@@ -28,8 +28,7 @@
def has_eslint_disable(file_path):
- """
- Check if a TypeScript file contains eslint-disable statements.
+ """Check if a TypeScript file contains eslint-disable statements.
Args:
file_path (str): Path to the TypeScript file.
@@ -37,9 +36,10 @@ def has_eslint_disable(file_path):
Returns:
bool: True if eslint-disable statement is found, False otherwise.
"""
+ # Initialize key variables
eslint_disable_pattern = re.compile(
- r"""\/\/\s*eslint-disable(?:-next-line
- |-line)?[^\n]*|\/\*\s*eslint-disable[^\*]*\*\/""",
+ r"\/\/\s*eslint-disable(?:-next-line"
+ r"|-line)?[^\n]*|\/\*\s*eslint-disable[^\*]*\*\/",
re.IGNORECASE,
)
@@ -59,8 +59,7 @@ def has_eslint_disable(file_path):
def check_eslint(files_or_directories):
- """
- Check TypeScript files for eslint-disable statements.
+ """Check TypeScript files for eslint-disable statements.
Args:
files_or_directories (list): List of files or directories to check.
@@ -75,7 +74,11 @@ def check_eslint(files_or_directories):
# If it's a file, directly check it
if item.endswith(".ts") or item.endswith(".tsx"):
if has_eslint_disable(item):
- print(f"File {item} contains eslint-disable statement. Please remove them and ensure the code adheres to the specified ESLint rules.")
+ print(
+ f"""\
+File {item} contains eslint-disable statement. Please remove them and \
+ensure the code adheres to the specified ESLint rules."""
+ )
eslint_found = True
elif os.path.isdir(item):
# If it's a directory, walk through it and check all
@@ -99,6 +102,8 @@ def check_eslint(files_or_directories):
def arg_parser_resolver():
"""Resolve the CLI arguments provided by the user.
+ Args: None
+
Returns:
result: Parsed argument object
"""
@@ -123,11 +128,17 @@ def arg_parser_resolver():
def main():
- """
- Execute the script's main functionality.
+ """Execute the script's main functionality.
+
+ Args:
+ None
+
+ Returns:
+ None
This function serves as the entry point for the script. It performs
the following tasks:
+
1. Validates and retrieves the files and directories to check from
command line arguments.
2. Recursively checks TypeScript files for eslint-disable statements.
diff --git a/.github/workflows/scripts/talawa_admin_md_mdx_format_adjuster.py b/.github/workflows/scripts/talawa_admin_md_mdx_format_adjuster.py
deleted file mode 100644
index cd76a30cf6..0000000000
--- a/.github/workflows/scripts/talawa_admin_md_mdx_format_adjuster.py
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/usr/bin/env python3
-# -*- coding: UTF-8 -*-
-"""
-Script to make Markdown files MDX compatible.
-
-This script scans Markdown files and escapes special characters (<, >, {, })
-to make them compatible with the MDX standard used in Docusaurus v3.
-
-This script complies with:
- 1) Pylint
- 2) Pydocstyle
- 3) Pycodestyle
- 4) Flake8
-"""
-import os
-import argparse
-import re
-
-def escape_mdx_characters(text):
- """
- Escape special characters in a text string for MDX compatibility.
- Avoids escaping already escaped characters.
-
- Args:
- text: A string containing the text to be processed.
-
- Returns:
- A string with special characters (<, >, {, }) escaped, avoiding
- double escaping.
- """
- # Regular expressions to find unescaped special characters
- patterns = {
- "<": r"(?": r"(?",
- "{": r"(? 0) {
console.info(
'\x1b[34m%s\x1b[0m',
- '\nInfo: Consider using custom hook functions.',
+ '\nInfo: Consider using custom hook functions.'
);
console.info(
- 'Please use the getItem, setItem, and removeItem functions provided by the custom hook useLocalStorage.\n',
+ 'Please use the getItem, setItem, and removeItem functions provided by the custom hook useLocalStorage.\n'
);
process.exit(1);
diff --git a/src/App.tsx b/src/App.tsx
index fbb394bc9d..a12f3e7ceb 100644
--- a/src/App.tsx
+++ b/src/App.tsx
@@ -38,7 +38,7 @@ import Posts from 'screens/UserPortal/Posts/Posts';
import Organizations from 'screens/UserPortal/Organizations/Organizations';
import People from 'screens/UserPortal/People/People';
import Settings from 'screens/UserPortal/Settings/Settings';
-// import Chat from 'screens/UserPortal/Chat/Chat';
+import Chat from 'screens/UserPortal/Chat/Chat';
import { useQuery } from '@apollo/client';
import { CHECK_AUTH } from 'GraphQl/Queries/Queries';
import Advertisements from 'components/Advertisements/Advertisements';
@@ -191,8 +191,8 @@ function app(): JSX.Element {
}>
} />
} />
- {/* } /> */}
}>
+ } />
} />
} />
} />
diff --git a/src/components/EventCalendar/EventHeader.spec.tsx b/src/components/EventCalendar/EventHeader.spec.tsx
index be1ba4bd78..84b8ceafec 100644
--- a/src/components/EventCalendar/EventHeader.spec.tsx
+++ b/src/components/EventCalendar/EventHeader.spec.tsx
@@ -69,7 +69,7 @@ describe('EventHeader Component', () => {
fireEvent.click(getByTestId('eventType'));
await act(async () => {
- fireEvent.click(getByTestId('events'));
+ fireEvent.click(getByTestId('Events'));
});
expect(handleChangeView).toHaveBeenCalledTimes(1);
diff --git a/src/components/EventCalendar/EventHeader.tsx b/src/components/EventCalendar/EventHeader.tsx
index d338de3b82..9201e8b696 100644
--- a/src/components/EventCalendar/EventHeader.tsx
+++ b/src/components/EventCalendar/EventHeader.tsx
@@ -1,9 +1,10 @@
import React, { useState } from 'react';
-import { Button, Dropdown, Form } from 'react-bootstrap';
+import { Button, Form } from 'react-bootstrap';
import { Search } from '@mui/icons-material';
import styles from '../../style/app.module.css';
import { ViewType } from '../../screens/OrganizationEvents/OrganizationEvents';
import { useTranslation } from 'react-i18next';
+import SortingButton from 'subComponents/SortingButton';
/**
* Props for the EventHeader component.
@@ -63,58 +64,30 @@ function eventHeader({
-
-
-
- {viewType}
-
-
-
- {ViewType.MONTH}
-
-
- {ViewType.DAY}
-
-
- {ViewType.YEAR}
-
-
-
-
-
-
-
- {t('eventType')}
-
-
-
- Events
-
-
- Workshops
-
-
-
-
+
+
console.log(`Selected: ${value}`)}
+ dataTestIdPrefix="eventType"
+ className={styles.dropdown}
+ buttonLabel={t('eventType')}
+ />