My Little coding practice

Feature: Testing DBs Suite
This feature will test DBs

Scenario Outline: Testing DBs
Given Executing tests for the a=<a>, a=<a>, a=<a>, a=<a>, a=<a>
When converting a=<a> data to a
When converting a=<a> data to a
Then perform validation on the a
Examples:
| product | a| a| a| a|
| ABC | LMN | XYZ | HIJ |Y |
| a | g | e | k |Y |
| d | j | h | e |Y |
| w | g | v | q |Y |




import pytest
from pytest_bdd import scenarios, given, when, then, parsers

# Load all scenarios from the feature file
scenarios("../features/dynamic_feature.feature")


# Given Steps
@given(parsers.parse("Executing tests for the a={a}, a={a}, a={a}, a={a}, a={a}"))
def account_initial_balance(a, a, a, a, a):
print("")
print("the parameter is a" + a)
print("the parameter is a" + aa)
print("the parameter is a" + a)
print("the parameter is a" + a)
print("the parameter is a" + a)


# When Steps
@when(parsers.parse("converting a={a} data to a"))
def deposit(a):
print("the parameter is a" + a)


@then(parsers.parse("perform validation on the a"))
def account_balance_should_be():
print("perform validation on the DataFrame")




 pytest --cucumberjson=reports.json -v --gherkin-terminal-reporter --capture=no --html=reports.html --log-cli-level=DEBUGpytest --cucumberjson=reports.json -v --gherkin-terminal-reporter --captur

e=no --html=reports.html --log-cli-level=DEBUG



=============================================================
Behave
=============================================================


# Input list, can be dynamic or come from other sources (like an API or a file)
dict = [
{"aaa": "aaa", "aaa": "aaa", "aaa": "aaa", "aaa": "aaa"},
]


# generate_feature_file.py

import os
from test_data import data_frame


def generate_dynamic_feature():
feature_content = """
Feature: Dynamic test case generation

Scenario Outline: Test case <index>
Given we have the input data
When we process the input <index>
Then the output should match the expected result

Examples:
| a| a| a| a| a|
"""

# Dynamically generate the Examples section based on the input list
examples = "\n".join([f" | {index} | {item['a']} | {item['a']} |{item['a']} | {item['a']} |" for index, item in enumerate(a)])

# Combine the base feature content with dynamically generated examples
feature_content += f"{examples}\n"

# Write the content to a dynamic feature file
feature_file_path = os.path.join(os.path.dirname(__file__), 'features', 'dynamic_test.feature')

with open(feature_file_path, "w") as feature_file:
feature_file.write(feature_content)
print(f"Feature file generated at: {feature_file_path}")

generate_dynamic_feature()

if __name__ == "__main__":
generate_dynamic_feature()



Feature: Dynamic test case generation

Scenario Outline: Test case <index>
Given we have the input data
When running test cases with <index>, <aaa>, <aaa>, <aaaa>, <aaa>
Then the output should match the expected result

Examples:
| index | aaa| aaa| aaa| aaa|
| 0 | aaa| aaa|aaa| aaa|


# steps/test_steps.py
import logging
from behave import given, when, then
from test_data import test_cases, data_frame

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

@given('we have the input data')
def step_given_input_data(context):
context.test_cases = data_frame

@when('running test cases with {index}, {aaa}, {aaa}, {aaa}, {aaa}')
def step_when_process_input(context, aaa, aaa, aaa, aaa, aaa):
print(index)
print(aaa)
print(aa)
print(aa)
print(a)

@then('the output should match the expected result')
def step_then_output_should_match(context):
print("Step: I have a configured behave environment")




import os
import subprocess
import json
import sys
from behave import __main__ as behave_exe

def run_behave():
# Define the directory containing your feature files
features_directory = os.path.join(os.getcwd(), 'features')

# Set up command-line arguments for Behave
args = [
'--format=json', # Use JSON format for output
'--outfile=report.json', # Output file for JSON report
features_directory
]

# Execute Behave tests
try:
behave_exe.main(args)
print("Behave tests executed successfully.")
except Exception as e:
print(f"An error occurred while running Behave: {e}")
sys.exit(1)

def pretty_print_json(file_path):
# Read the JSON output and pretty print it
try:
with open(file_path, 'r') as json_file:
data = json.load(json_file)

# Pretty print the JSON data
pretty_json = json.dumps(data, indent=4)
print(pretty_json)
except Exception as e:
print(f"Error reading or pretty printing JSON: {e}")
sys.exit(1)

if __name__ == '__main__':
# Run Behave tests and generate JSON report
run_behave()

# Pretty print the JSON report
pretty_print_json('report.json')



















 

Comments

Popular Posts