* Smoke tests in nodejs

* Fixes

* oops

* Use a local repo instead of git repo

* Updated readme

* Update readme

* Linter fixes

* News entry

* Refactor

* Fixes

* Refactor

* UI tests

* CI pipeline changes

* ui tests

* No experiment and no insider build

* oops

* Rename

* Fixes

* Verbose logging

* Hack

* Verbose logging of ui tests

* Hacky test

* Disable experiments

* Oops

* Include progress message

* Capture screenshots

* Testing

* More smokes

* More screenshots

* Oops

* Use commands to max and min panels

* Fixes

* Fixes

* Fixes

* Add timeout

* More control over test environment

* Fixes to selector

* Fixes

* No need of comments

* Smoke in insider as well for testing purposes

* Fixes

* Fixes

* Comments

* Fixes
This commit is contained in:
Don Jayamanne 2019-09-28 01:11:44 +04:00 коммит произвёл GitHub
Родитель 1764e6673c
Коммит c30eeae9b6
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: 4AEE18F83AFDEB23
120 изменённых файлов: 20210 добавлений и 310 удалений

14
.gitignore поставляемый
Просмотреть файл

@ -1,7 +1,8 @@
.DS_Store
.huskyrc.json
out
node_modules
log.log
**/node_modules
*.pyc
*.vsix
**/.vscode/.ropeproject/**
@ -14,9 +15,10 @@ npm-debug.log
**/.mypy_cache/**
!yarn.lock
coverage/
.vscode-test/**
.vscode test/**
.vscode-smoke/**
cucumber-report.json
**/.vscode-test/**
**/.vscode test/**
**/.vscode-smoke/**
**/.venv*/
port.txt
precommit.hook
@ -25,6 +27,9 @@ pythonFiles/lib/**
debug_coverage*/**
languageServer/**
languageServer.*/**
!uitests/features/languageServer/**
!uitests/src/languageServer/**
!uitests/code/**/languageServer/**
bin/**
obj/**
.pytest_cache
@ -32,4 +37,5 @@ tmp/**
.python-version
.vs/
test-results.xml
uitests/out/**
!build/

6
.vscode/settings.json поставляемый
Просмотреть файл

@ -2,16 +2,22 @@
{
"files.exclude": {
"out": true, // set this to true to hide the "out" folder with the compiled JS files
"uitests/out": true,
"**/*.pyc": true,
".nyc_output": true,
"obj": true,
"bin": true,
"**/__pycache__": true,
"**/node_modules": true,
".vscode-test": false,
".vscode test": false,
"**/.mypy_cache/**": true,
"**/.ropeproject/**": true
},
"search.exclude": {
"out": true, // set this to false to include "out" folder in search results
"uitests/out": true,
"**/node_modules": true,
"coverage": true,
"languageServer*/**": true,
".vscode-test": true,

Просмотреть файл

@ -18,6 +18,7 @@ CODING_STANDARDS.md
CONTRIBUTING.md
CONTRIBUTING - LANGUAGE SERVER.md
coverconfig.json
cucumber-report.json
gulpfile.js
package.datascience-ui.dependencies.json
package-lock.json

Просмотреть файл

@ -14,8 +14,17 @@ jobs:
steps:
- template: ../steps/build.yml
- job: Lint
- job: Hygiene
pool:
vmImage: "macos-latest"
steps:
- template: ../steps/lint.yml
- template: ../steps/initialization.yml
parameters:
PythonVersion: $(PythonVersion)
workingDirectory: $(Build.SourcesDirectory)
compile: 'false'
installVSCEorNPX: 'false'
- bash: npx tslint --project tsconfig.json
displayName: "Lint"
workingDirectory: $(Build.SourcesDirectory)

Просмотреть файл

@ -0,0 +1,31 @@
# Compile and build uitests
jobs:
- job: Compile
pool:
vmImage: "macos-latest"
steps:
- template: ../steps/initialization.yml
parameters:
workingDirectory: $(Build.SourcesDirectory)/uitests
compile: 'true'
installVSCEorNPX: 'false'
- job: Hygiene
pool:
vmImage: "macos-latest"
steps:
- template: ../steps/initialization.yml
parameters:
workingDirectory: $(Build.SourcesDirectory)/uitests
compile: 'false'
installVSCEorNPX: 'false'
- bash: npx tslint --project tsconfig.json
displayName: "Lint"
workingDirectory: $(Build.SourcesDirectory)/uitests
- bash: npx prettier --check 'src/**/*.ts'
displayName: "Format"
workingDirectory: $(Build.SourcesDirectory)/uitests

Просмотреть файл

@ -0,0 +1,47 @@
# Overview:
# Generic jobs template to compile and build extension
jobs:
- job: UI_Test_Report
timeoutInMinutes: 90
pool:
vmImage: "macos-latest"
steps:
- template: ../steps/initialization.yml
parameters:
workingDirectory: $(Build.SourcesDirectory)/uitests
compile: 'true'
- bash: mkdir -p reports
workingDirectory: $(Build.SourcesDirectory)/uitests
displayName: "Create Reports Directory"
- task: DownloadBuildArtifacts@0
inputs:
buildType: "current"
allowPartiallySucceededBuilds: true
downloadType: "Specific"
itemPattern: "**/.vscode test/reports/cucumber_report_*.json"
downloadPath: "$(Build.SourcesDirectory)/uitests/reports"
displayName: "Restore Cucumber Reports"
condition: always()
- bash: node ./out/index.js report --jsonDir=./reports --htmlOutput=./reports
workingDirectory: $(Build.SourcesDirectory)/uitests
displayName: "Merge and generate report"
condition: always()
- task: CopyFiles@2
inputs:
sourceFolder: $(Build.SourcesDirectory)/uitests/reports
contents: "**"
targetFolder: $(Build.ArtifactStagingDirectory)
displayName: "Copy Report"
condition: always()
- task: PublishBuildArtifacts@1
inputs:
pathtoPublish: $(Build.ArtifactStagingDirectory)
artifactName: UIReport
displayName: "Publish Report"
condition: always()

Просмотреть файл

@ -25,18 +25,18 @@
# parameters:
# jobs:
# - test: "Smoke"
# tags: "--tags=@smoke"
# tags: "@smoke"
# - test: "Test"
# tags: "--tags=@test"
# tags: "@test"
# - test: "Terminal"
# tags: "--tags=@terminal"
# tags: "@terminal"
# ```
# Based on this sample, we're running 3 tests with the names `Smoke`, `Test`, and `Terminal`.
# The tags inside each test contains the arguments that needs to be passd into behave.
# I.e. we're only testing BDD tests that contain the tag `@smoke`, `@test` & `@terminal` (as separate jobs).
# Please pass in just the `tags` arguments.
# Multiple tag values can be passed in as follows:
# tags: "--tags=@debug --tags=@remote"
# tags: "@debug and @remote"
# More information on --tags argument for behave can be found here:
# * https://behave.readthedocs.io/en/latest/tutorial.html#controlling-things-with-tags
# * https://behave.readthedocs.io/en/latest/tag_expressions.html
@ -52,7 +52,7 @@
# vscodeChannels: ['stable']
# jobs:
# - test: "Smoke"
# tags: "--tags=@smoke"
# tags: "@smoke"
# ignorePythonVersions: "3.6,3.5"
# ```
# Based on this sample, we're running 1 test with the name `Smoke`.
@ -73,7 +73,7 @@
# vscodeChannels: ['stable']
# jobs:
# - test: "Smoke"
# tags: "--tags=@smoke"
# tags: "@smoke"
# ignorePythonVersions: "3.6,3.5"
# ```
# Based on this sample, we're running 1 test with the name `Smoke`.
@ -93,36 +93,33 @@ parameters:
vscodeChannels: ['stable', 'insider']
pythonVersions: [
{
"version": "3.7",
"version": "3.7.4",
"displayName": "37",
"excludeTags": "--tags=~@python3.6 --tags=~@python3.5 --tags=~@python2"
"excludeTags": "not @python3.6 and not @python3.5 and not @python2"
},
{
"version": "3.6",
"displayName": "36",
"excludeTags": "--tags=~@python3.7 --tags=~@python3.5 --tags=~@python2"
"excludeTags": "not @python3.7 and not @python3.5 and not @python2 and not @noNeedToTestInAllPython"
},
{
"version": "3.5",
"displayName": "35",
"excludeTags": "--tags=~@python3.7 --tags=~@python3.6 --tags=~@python2"
"excludeTags": "not @python3.7 and not @python3.6 and not @python2 and not @noNeedToTestInAllPython"
},
{
"version": "2.7",
"displayName": "27",
"excludeTags": "--tags=~@python3.7 --tags=~@python3.5 --tags=~@python3"
"excludeTags": "not @python3.7 and not @python3.5 and not @python3 and not @noNeedToTestInAllPython"
}
]
jobs:
- job: UITest
dependsOn:
- Compile
- Build
# Remember, some tests can take easily an hour (the `tests` features take just around 1 hour).
timeoutInMinutes: 90
# Build our matrix (permutations of all environments & tests).
# Build our matrix (permutations of VS Code + Tests + Pyhton + OS).
strategy:
matrix:
${{ each channel in parameters.vscodeChannels }}:
@ -134,24 +131,24 @@ jobs:
PythonVersion: ${{ py.version }}
VMImageName: "macos-latest"
VSCodeChannel: ${{ channel }}
Tags: ${{ format('{0} {1} --tags=~@win --tags=~@linux', job.tags, py.excludeTags) }}
Tags: ${{ format('{0} and {1} and not @win and not @linux', job.tags, py.excludeTags) }}
${{ if not(contains(coalesce(job.ignoreOperatingSystems, ''), 'win')) }}:
${{ format('Win{2}{0}{1}', py.displayName, job.test, channel) }}:
PythonVersion: ${{ py.version }}
VSCodeChannel: ${{ channel }}
VMImageName: "vs2017-win2016"
Tags: ${{ format('{0} {1} --tags=~@mac --tags=~@linux', job.tags, py.excludeTags) }}
Tags: ${{ format('{0} and {1} and not @mac and not @linux', job.tags, py.excludeTags) }}
${{ if not(contains(coalesce(job.ignoreOperatingSystems, ''), 'linux')) }}:
${{ format('Linux{2}{0}{1}', py.displayName, job.test, channel) }}:
PythonVersion: ${{ py.version }}
VSCodeChannel: ${{ channel }}
VMImageName: "ubuntu-latest"
Tags: ${{ format('{0} {1} --tags=~@mac --tags=~@win', job.tags, py.excludeTags) }}
Tags: ${{ format('{0} and {1} and not @mac and not @win', job.tags, py.excludeTags) }}
pool:
vmImage: $(VMImageName)
steps:
- template: uitest_phases.yml
- template: ../steps/uitest.yml

Просмотреть файл

@ -1,13 +0,0 @@
# Lint the source
steps:
- template: initialization.yml
parameters:
PythonVersion: $(PythonVersion)
workingDirectory: $(Build.SourcesDirectory)
compile: 'false'
installVSCEorNPX: 'false'
- bash: npx tslint --project tsconfig.json
displayName: "Lint"
workingDirectory: $(Build.SourcesDirectory)

Просмотреть файл

@ -14,15 +14,14 @@
# 9. Start xvfb - Start in-memory display server (for launching VSC).
# 10. Restore VSIX - VSIX has been built in another Job, download that from artifacts.
# 11. Copy VSIX - Copy the VSIX into root directory (test suite expects it to be in root - default setup).
# 12. Setup pyperclicp dependency - We use pyperclip to copy text into clipboard buffer (see where this is used in code for info).
# 13. Download & install UI Test dependencies - Download & Install everything required for the UI tests.
# 14. Run Tests - Launch the UI tests in Python
# 15. Copy Reports
# 16. Copy Screenshots
# 17. Copy Extension Logs
# 18. Copy VSC Logs
# 19. Upload Reports - Upload as artifacts to Azure Devops
# 20. Test Results - Upload test results to Azure Devops
# 12. Compile - Npm compile
# 13. Run Tests - Launch the UI tests in Nodejs
# 14. Copy Reports
# 15. Copy Screenshots
# 16. Copy Extension Logs
# 17. Copy VSC Logs
# 18. Upload Reports - Upload as artifacts to Azure Devops
# 19. Test Results - Upload test results to Azure Devops
# -----------------------------------------------------------------------------------------------------------------------------
# Variables:
# -----------------------------------------------------------------------------------------------------------------------------
@ -32,7 +31,7 @@
# 2. Tags
# Mandatory.
# Contain the `--tags=....` arguments to be passed into behave to exclude certain tags.
# Multiple tags can be passed as `--tags=@smoke --tags=~@ignore1 --tags=~@another --tags=~@andMore`
# Multiple tags can be passed as `@smoke and not @ignore1 and @another and not @andMore`
# More information on --tags argument for behave can be found here:
# * https://behave.readthedocs.io/en/latest/tutorial.html#controlling-things-with-tags
# * https://behave.readthedocs.io/en/latest/tag_expressions.html
@ -41,95 +40,24 @@
# 4. VMImageName
# VM Image to be used (standard Azure Devops variable).
steps:
- bash: |
printenv
displayName: "Show all env vars"
condition: eq(variables['system.debug'], 'true')
- task: NodeTool@0
displayName: "Use Node $(NodeVersion)"
inputs:
versionSpec: $(NodeVersion)
- task: UsePythonVersion@0
displayName: "Setup Python $(PythonVersion) for extension"
inputs:
versionSpec: $(PythonVersion)
# Conda
- bash: echo "##vso[task.prependpath]$CONDA/bin"
displayName: Add conda to PATH
condition: and(succeeded(), not(eq(variables['agent.os'], 'Windows_NT')))
- powershell: Write-Host "##vso[task.prependpath]$env:CONDA\Scripts"
displayName: Add conda to PATH
condition: and(succeeded(), eq(variables['agent.os'], 'Windows_NT'))
# On Hosted macOS, the agent user doesn't have ownership of Miniconda's installation directory/
# We need to take ownership if we want to update conda or install packages globally
- bash: sudo chown -R $USER $CONDA
displayName: Take ownership of conda installation
condition: and(succeeded(), eq(variables['agent.os'], 'Darwin'))
- script: |
export CI_PYTHON_PATH=`which python`
echo '##vso[task.setvariable variable=CI_PYTHON_PATH]'$CI_PYTHON_PATH
displayName: "Setup CI_PYTHON_PATH for extension"
condition: and(succeeded(), not(eq(variables['agent.os'], 'Windows_NT')))
- powershell: |
$CI_PYTHON_PATH = (get-command python).path
Write-Host "##vso[task.setvariable variable=CI_PYTHON_PATH]$CI_PYTHON_PATH"
Write-Host $CI_PYTHON_PATH
displayName: "Setup CI_PYTHON_PATH for extension"
condition: and(succeeded(), eq(variables['agent.os'], 'Windows_NT'))
# Some tests need to have both 2.7 & 3.7 available.
# Also, use Python 3.7 to run the scripts that drive the ui tests.
# Order matters, currently active python version will be used to drive tests.
# Hence ensure 3.7 is setup last.
- task: UsePythonVersion@0
displayName: "Use Python 2.7"
displayName: 'Use Python 2.7'
inputs:
versionSpec: 2.7
- task: UsePythonVersion@0
displayName: "Use Python 3.7 (to drive tests)"
displayName: 'Use Python 3.7'
inputs:
versionSpec: 3.7
- task: Npm@1
displayName: "Use NPM $(NpmVersion)"
inputs:
command: custom
verbose: true
customCommand: "install -g npm@$(NpmVersion)"
- task: Npm@1
displayName: "npm ci"
inputs:
command: custom
verbose: true
customCommand: ci
- bash: |
echo AVAILABLE DEPENDENCY VERSIONS
echo Node Version = `node -v`
echo NPM Version = `npm -v`
echo Python Version = `python --version`
echo Gulp Version = `gulp --version`
condition: and(succeeded(), eq(variables['system.debug'], 'true'))
displayName: Show Dependency Versions
# https://code.visualstudio.com/api/working-with-extensions/continuous-integration#azure-pipelines
- bash: |
set -e
/usr/bin/Xvfb :10 -ac >> /tmp/Xvfb.out 2>&1 &
disown -ar
displayName: "Start xvfb"
condition: and(succeeded(), eq(variables['Agent.Os'], 'Linux'), not(variables['SkipXvfb']))
# Setup python environment in current path for extension to use.
- template: initialization.yml
parameters:
PythonVersion: $(PythonVersion)
workingDirectory: $(Build.SourcesDirectory)/uitests
compile: 'true'
- task: DownloadBuildArtifacts@0
inputs:
@ -142,104 +70,132 @@ steps:
- task: CopyFiles@2
inputs:
sourceFolder: "$(Build.SourcesDirectory)/VSIX"
targetFolder: $(Build.SourcesDirectory)
targetFolder: $(Build.SourcesDirectory)/uitests
displayName: "Copy VSIX"
condition: succeeded()
# pyperclip needs more dependencies installed on Linux
# See https://github.com/asweigart/pyperclip/blob/master/docs/introduction.rst
- bash: sudo apt-get install xsel
displayName: "Setup pyperclip dependency"
condition: and(succeeded(), eq(variables['Agent.Os'], 'Linux'))
# Run the UI Tests.
- bash: |
python -m pip install -U pip
python -m pip install --upgrade -r ./uitests/requirements.txt
python uitests download --channel=$(VSCodeChannel)
npm install -g vsce
python uitests install --channel=$(VSCodeChannel)
cd ./bootstrap/extension
npm run build
workingDirectory: $(Build.SourcesDirectory)/uitests
displayName: "Build Bootstrap Extension"
condition: succeeded()
- bash: node ./out/index.js download --channel=$(VSCodeChannel)
workingDirectory: $(Build.SourcesDirectory)/uitests
env:
DISPLAY: :10
AgentJobName: $(Agent.JobName)
displayName: "Download & Install UI Test Dependencies"
VSCODE_CHANNEL: $(VSCodeChannel)
displayName: 'Download VS Code $(VSCodeChannel)'
condition: succeeded()
- bash: node ./out/index.js install --channel=$(VSCodeChannel)
workingDirectory: $(Build.SourcesDirectory)/uitests
env:
VSCODE_CHANNEL: $(VSCodeChannel)
displayName: 'Install Extension(s)'
condition: succeeded()
# Setup python environment in current path for extension.
- task: UsePythonVersion@0
displayName: 'Setup Python $(PythonVersion) for extension'
inputs:
versionSpec: $(PythonVersion)
# On Hosted macOS, the agent user doesn't have ownership of Miniconda's installation directory/
# We need to take ownership if we want to update conda or install packages globally
- bash: sudo chown -R $USER $CONDA
displayName: Take ownership of conda installation
condition: and(succeeded(), eq(variables['agent.os'], 'Darwin'))
- script: |
export CI_PYTHON_PATH=`which python`
echo '##vso[task.setvariable variable=CI_PYTHON_PATH]'$CI_PYTHON_PATH
displayName: 'Setup CI_PYTHON_PATH for extension'
condition: and(succeeded(), not(eq(variables['agent.os'], 'Windows_NT')))
- powershell: |
$CI_PYTHON_PATH = (get-command python).path
Write-Host "##vso[task.setvariable variable=CI_PYTHON_PATH]$CI_PYTHON_PATH"
Write-Host $CI_PYTHON_PATH
displayName: 'Setup CI_PYTHON_PATH for extension'
condition: and(succeeded(), eq(variables['agent.os'], 'Windows_NT'))
# Conda
- bash: echo "##vso[task.prependpath]$CONDA/bin"
displayName: Add conda to PATH
condition: and(succeeded(), not(eq(variables['agent.os'], 'Windows_NT')))
- powershell: Write-Host "##vso[task.prependpath]$env:CONDA\Scripts"
displayName: Add conda to PATH
condition: and(succeeded(), eq(variables['agent.os'], 'Windows_NT'))
# Ensure reports folder exists.
- bash: mkdir -p './.vscode test/reports'
workingDirectory: $(Build.SourcesDirectory)/uitests
displayName: 'Create Reports folder'
condition: succeeded()
# Skip @skip tagged tests
# Always dump to a text file (easier than scrolling and downloading the logs seprately).
# This way all logs are part of the artificat for each test.
- script: python uitests test --channel=$(VSCodeChannel) -- --format=pretty $(Tags) --tags=~@skip --logging-level=INFO --no-logcapture --no-capture -D python_path=$(CI_PYTHON_PATH) | tee '.vscode test/reports/behave_log.log'
# Passing CI_PYTHON_PATH on windows sucks, as `\` needs to be escaped. Just use env variable.
# node ./out/index.js test --verbose=true --channel=$(VSCodeChannel) --pythonPath=$(CI_PYTHON_PATH) -- --tags='$(Tags) and not @skip' --exit | tee './.vscode test/reports/log.log'
# Don't use `npm run test` (we need the exit code to propagage all the way up).
- bash: node ./out/index.js test --verbose=$(VERBOSE) --channel=$(VSCodeChannel) -- --tags='$(Tags) and not @skip'
workingDirectory: $(Build.SourcesDirectory)/uitests
env:
DISPLAY: :10
AgentJobName: $(Agent.JobName)
AZURE_COGNITIVE_ENDPOINT: $(AZURE_COGNITIVE_ENDPOINT)
AZURE_COGNITIVE_KEY: $(AZURE_COGNITIVE_KEY)
VSCODE_CHANNEL: $(VSCodeChannel)
CI_PYTHON_PATH: $(CI_PYTHON_PATH)
PYTHON_VERSION: $(PythonVersion)
failOnStderr: false
displayName: "Run Tests"
VERBOSE: $(system.debug)
displayName: 'Run Tests'
condition: succeeded()
# Write exit code to a text file, so we can read it and fail CI in a separate task (fail if file exists).
# CI doesn't seem to fail based on exit codes.
# We can't fail on writing to stderr either as python logs stuff there & other errors that can be ignored are written there.
- bash: |
FILE=uitests/uitests/uitest_failed.txt
if [[ -f "$FILE" ]];
then
echo "UI Tests failed"
exit 1
fi
displayName: "Check if UI Tests Passed"
condition: succeeded()
# Generate and publis results even if there are failures in previous steps.
- script: python uitests report
env:
AgentJobName: $(Agent.JobName)
displayName: "Generate Reports"
- task: CopyFiles@2
inputs:
sourceFolder: $(Build.SourcesDirectory)/uitests
contents: '.vscode test/reports/**'
targetFolder: $(Build.ArtifactStagingDirectory)
displayName: 'Copy Reports'
condition: always()
- task: CopyFiles@2
inputs:
contents: ".vscode test/reports/**"
sourceFolder: $(Build.SourcesDirectory)/uitests
contents: '.vscode test/screenshots/**'
targetFolder: $(Build.ArtifactStagingDirectory)
displayName: "Copy Reports"
displayName: 'Copy Screenshots'
condition: always()
- task: CopyFiles@2
inputs:
contents: ".vscode test/screenshots/**"
sourceFolder: $(Build.SourcesDirectory)/uitests
contents: '.vscode test/logs/**'
targetFolder: $(Build.ArtifactStagingDirectory)
displayName: "Copy Screenshots"
displayName: 'Copy Extension Logs'
condition: always()
- task: CopyFiles@2
inputs:
contents: ".vscode test/logs/**"
sourceFolder: $(Build.SourcesDirectory)/uitests
contents: '.vscode test/user/logs/**'
targetFolder: $(Build.ArtifactStagingDirectory)
displayName: "Copy Extension Logs"
condition: always()
- task: CopyFiles@2
inputs:
contents: ".vscode test/user/logs/**"
targetFolder: $(Build.ArtifactStagingDirectory)
displayName: "Copy VSC Logs"
displayName: 'Copy VSC Logs'
condition: always()
- task: PublishBuildArtifacts@1
inputs:
pathtoPublish: $(Build.ArtifactStagingDirectory)
artifactName: $(Agent.JobName)
displayName: "Upload Reports"
displayName: 'Upload Reports'
condition: always()
- task: PublishTestResults@2
displayName: "TestResults"
displayName: 'TestResults'
inputs:
testRunTitle: $(Agent.JobName)
testRunner: JUnit
testResultsFiles: "$(Build.SourcesDirectory)/.vscode test/reports/*.xml"
testResultsFiles: '$(Build.SourcesDirectory)/uitests/.vscode test/reports/*.xml'
condition: always()

Просмотреть файл

@ -20,7 +20,10 @@ stages:
jobs:
- template: templates/jobs/build_compile.yml
# - template: templates/jobs/smoke.yml
- stage: Build_UITests
dependsOn: []
jobs:
- template: templates/jobs/build_uitests.yml
- stage: Tests
dependsOn:
@ -266,39 +269,19 @@ stages:
steps:
- template: templates/test_phases.yml
- job: 'Smoke'
dependsOn: []
strategy:
matrix:
'Mac-Py3.7':
PythonVersion: '3.7'
VMImageName: 'macos-10.13'
TestsToRun: 'testSmoke'
NeedsPythonTestReqs: true
'Linux-Py3.7':
PythonVersion: '3.7'
VMImageName: 'ubuntu-16.04'
TestsToRun: 'testSmoke'
NeedsPythonTestReqs: true
'Win-Py3.7':
PythonVersion: '3.7'
VMImageName: 'vs2017-win2016'
TestsToRun: 'testSmoke'
NeedsPythonTestReqs: true
pool:
vmImage: $(VMImageName)
steps:
- template: templates/test_phases.yml
- stage: Smoke
dependsOn:
- Build
- Build_UITests
jobs:
- template: templates/jobs/smoke.yml
- stage: Reports
dependsOn:
# - Smoke_Tests
- Smoke
- Tests
condition: always()
jobs:
# - template: templates/jobs/merge_upload_uitest_report.yml
- template: templates/jobs/merge_upload_uitest_report.yml
- template: templates/jobs/coverage.yml

Просмотреть файл

@ -27,7 +27,10 @@ stages:
jobs:
- template: templates/jobs/build_compile.yml
# - template: templates/jobs/smoke.yml
- stage: Build_UITests
dependsOn: []
jobs:
- template: templates/jobs/build_uitests.yml
- stage: Tests
dependsOn:
@ -435,38 +438,18 @@ stages:
steps:
- template: templates/test_phases.yml
- job: 'Smoke'
dependsOn: []
strategy:
matrix:
'Mac-Py3.7':
PythonVersion: '3.7'
VMImageName: 'macos-10.13'
TestsToRun: 'testSmoke'
NeedsPythonTestReqs: true
'Linux-Py3.7':
PythonVersion: '3.7'
VMImageName: 'ubuntu-16.04'
TestsToRun: 'testSmoke'
NeedsPythonTestReqs: true
'Win-Py3.7':
PythonVersion: '3.7'
VMImageName: 'vs2017-win2016'
TestsToRun: 'testSmoke'
NeedsPythonTestReqs: true
pool:
vmImage: $(VMImageName)
steps:
- template: templates/test_phases.yml
- stage: Smoke
dependsOn:
- Build
- Build_UITests
jobs:
- template: templates/jobs/smoke.yml
- stage: Reports
dependsOn:
# - Smoke_Tests
- Smoke
- Tests
condition: always()
jobs:
# - template: templates/jobs/merge_upload_uitest_report.yml
- template: templates/jobs/merge_upload_uitest_report.yml
- template: templates/jobs/coverage.yml

Просмотреть файл

@ -13,43 +13,56 @@ schedules:
# Variables that are available for the entire pipeline.
variables:
PythonVersion: '3.7'
NodeVersion: '10.5.0'
NpmVersion: '6.10.3'
MOCHA_FILE: '$(Build.ArtifactStagingDirectory)/test-junit.xml' # All test files will write their JUnit xml output to this file, clobbering the last time it was written.
MOCHA_REPORTER_JUNIT: true # Use the mocha-multi-reporters and send output to both console (spec) and JUnit (mocha-junit-reporter).
VSC_PYTHON_FORCE_LOGGING: true # Enable this to turn on console output for the logger
VSC_PYTHON_LOG_FILE: '$(Build.ArtifactStagingDirectory)/pvsc.log'
- template: templates/globals.yml
jobs:
- template: templates/build_compile_jobs.yml
stages:
- stage: Build
jobs:
- template: templates/jobs/build_compile.yml
- template: templates/uitest_jobs.yml
parameters:
jobs:
- test: "Smoke"
tags: "--tags=@smoke"
# Smoke tests are cheap, so run them against all Python Versions.
- test: "Test"
tags: "--tags=@testing"
# We have python code that is involved in running/discovering tests.
# Hence test against all versions, until we have CI running for the Python code.
# I.e. when all test discovery/running is done purely in Python.
- test: "Terminal"
tags: "--tags=@terminal --tags=~@terminal.pipenv"
# No need to run tests against all versions.
# This is faster/cheaper, besides activation of terminals is generic enough
# not to warrant testing against all versions.
ignorePythonVersions: "3.6,3.5"
- test: "Debugging"
tags: "--tags=@debugging"
# No need to run tests against all versions.
# This is faster/cheaper, and these are external packages.
# We expect them to work (or 3rd party packages to test against all PY versions).
ignorePythonVersions: "3.6,3.5"
- test: "Jedi_Language_Server"
tags: "--tags=@ls"
# No need to run tests against all versions.
# This is faster/cheaper, and these are external packages.
# We expect them to work (or 3rd party packages to test against all PY versions).
ignorePythonVersions: "3.6,3.5"
- stage: Build_UITests
dependsOn: []
jobs:
- template: templates/jobs/build_uitests.yml
- stage: UITests
dependsOn:
- Build
- Build_UITests
jobs:
- template: templates/jobs/uitest.yml
parameters:
jobs:
- test: "Smoke"
tags: "@smoke"
# Smoke tests are cheap, so run them against all Python Versions.
- test: "Test"
tags: "@testing"
# We have python code that is involved in running/discovering tests.
# Hence test against all versions, until we have CI running for the Python code.
# I.e. when all test discovery/running is done purely in Python.
- test: "Terminal"
tags: "@terminal and not @terminal.pipenv"
# No need to run tests against all versions.
# This is faster/cheaper, besides activation of terminals is generic enough
# not to warrant testing against all versions.
ignorePythonVersions: "3.6,3.5"
- test: "Debugging"
tags: "@debugging"
# No need to run tests against all versions.
# This is faster/cheaper, and these are external packages.
# We expect them to work (or 3rd party packages to test against all PY versions).
ignorePythonVersions: "3.6,3.5"
- test: "Jedi_Language_Server"
tags: "@ls"
# No need to run tests against all versions.
# This is faster/cheaper, and these are external packages.
# We expect them to work (or 3rd party packages to test against all PY versions).
ignorePythonVersions: "3.6,3.5"
- stage: Reports
dependsOn:
- UITests
condition: always()
jobs:
- template: templates/jobs/merge_upload_uitest_report.yml

Просмотреть файл

@ -22,7 +22,10 @@ stages:
jobs:
- template: templates/jobs/build_compile.yml
# - template: templates/jobs/smoke.yml
- stage: Build_UITests
dependsOn: []
jobs:
- template: templates/jobs/build_uitests.yml
- stage: Tests
dependsOn:
@ -95,38 +98,18 @@ stages:
steps:
- template: templates/test_phases.yml
- job: 'Smoke'
dependsOn: []
strategy:
matrix:
'Mac-Py3.7':
PythonVersion: '3.7'
VMImageName: 'macos-10.13'
TestsToRun: 'testSmoke'
NeedsPythonTestReqs: true
'Linux-Py3.7':
PythonVersion: '3.7'
VMImageName: 'ubuntu-16.04'
TestsToRun: 'testSmoke'
NeedsPythonTestReqs: true
'Win-Py3.7':
PythonVersion: '3.7'
VMImageName: 'vs2017-win2016'
TestsToRun: 'testSmoke'
NeedsPythonTestReqs: true
pool:
vmImage: $(VMImageName)
steps:
- template: templates/test_phases.yml
- stage: Smoke
dependsOn:
- Build
- Build_UITests
jobs:
- template: templates/jobs/smoke.yml
- stage: Reports
dependsOn:
# - Smoke_Tests
- Smoke
- Tests
condition: always()
jobs:
# - template: templates/jobs/merge_upload_uitest_report.yml
- template: templates/jobs/merge_upload_uitest_report.yml
- template: templates/jobs/coverage.yml

Просмотреть файл

@ -0,0 +1 @@
Re-enabled smoke tests (refactored in `node.js` with [puppeteer](https://github.com/GoogleChrome/puppeteer)).

21
pvsc.code-workspace Normal file
Просмотреть файл

@ -0,0 +1,21 @@
{
"folders": [
{
"path": "."
},
{
"path": "uitests"
}
],
"settings": {
"typescript.tsdk": "./node_modules/typescript/lib",
"search.exclude": {
"**/node_modules/**": true,
"**/.vscode test/insider/**": true,
"**/.vscode test/stable/**": true,
"**/.vscode-test/insider/**": true,
"**/.vscode-test/stable/**": true,
"**/out/**": true
}
}
}

Просмотреть файл

@ -20,8 +20,6 @@
"noImplicitThis": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
// We don't worry about this one:
//"noImplicitReturns": true,
"noFallthroughCasesInSwitch": true
},
"exclude": [
@ -32,6 +30,8 @@
"src/client/node_modules",
"src/server/src/typings",
"src/client/src/typings",
"build"
"src/smoke",
"build",
"uitests"
]
}

24
uitests/.gitignore поставляемый Normal file
Просмотреть файл

@ -0,0 +1,24 @@
@rerun*.txt
.DS_Store
out
**/node_modules
*.vsix
**/.vscode/.ropeproject/**
**/testFiles/**/.cache/**
*.noseids
.nyc_output
npm-debug.log
**/.mypy_cache/**
cucumber-report.json
.vscode-test/**
.vscode test/**
**/.venv*/
bin/**
obj/**
.pytest_cache
tmp/**
.python-version
.vs/
test-results.xml
uitests/out/**
!build/

6
uitests/.prettierrc.js Normal file
Просмотреть файл

@ -0,0 +1,6 @@
module.exports = {
semi: true,
singleQuote: true,
printWidth: 180,
tabWidth: 4
};

24
uitests/.vscode/launch.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1,24 @@
// A launch configuration that compiles the extension and then opens it inside a new window
{
"version": "0.1.0",
"configurations": [
{
"type": "node",
"request": "launch",
"name": "UI Tests",
"program": "${workspaceFolder}/out/index.js",
"sourceMaps": true,
"outFiles": ["${workspaceFolder}/out/**/*.js"],
"args": [
"test",
"--pythonPath",
"/Users/donjayamanne/.pyenv/versions/3.7.3/bin/python",
"--",
// Change the tag `@wip` to what ever you want to run.
// Default is assumed to be somethign that's a work in progress (wip).
"--tags=@wip"
],
"skipFiles": ["<node_internals>/**"]
}
]
}

42
uitests/.vscode/settings.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1,42 @@
// Place your settings in this file to overwrite default and user settings.
{
"files.exclude": {
"out": true,
"**/*.pyc": true,
".nyc_output": true,
"obj": true,
"bin": true,
"**/__pycache__": true,
"**/node_modules": true,
".vscode test": false,
".vscode-test": false,
"**/.mypy_cache/**": true,
"**/.ropeproject/**": true
},
"search.exclude": {
"out": true,
"**/node_modules": true,
"coverage": true,
"languageServer*/**": true,
".vscode-test": true,
".vscode test": true
},
"[python]": {
"editor.formatOnSave": true
},
"[typescript]": {
"editor.formatOnSave": true
},
"typescript.preferences.quoteStyle": "single",
"javascript.preferences.quoteStyle": "single",
"typescriptHero.imports.stringQuoteStyle": "'",
"prettier.tslintIntegration": true,
"cucumberautocomplete.skipDocStringsFormat": true,
"[javascript]": {
"editor.formatOnSave": true
},
"editor.codeActionsOnSave": {
"source.fixAll": true,
"source.fixAll.tslint": true
}
}

28
uitests/.vscode/tasks.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1,28 @@
{
"version": "2.0.0",
"presentation": {
"echo": true,
"reveal": "always",
"focus": false,
"panel": "shared"
},
"tasks": [
{
"label": "Compile UI Tests",
"type": "npm",
"script": "compile",
"isBackground": true,
"problemMatcher": [
"$tsc-watch",
{
"base": "$tslint5",
"fileLocation": "relative"
}
],
"group": {
"kind": "build",
"isDefault": true
}
}
]
}

153
uitests/README.md Normal file
Просмотреть файл

@ -0,0 +1,153 @@
# VS Code Smoke Test
## Usage
```shell
$ # The step `npm run package` is required to ensure the 'ms-python-insiders.vsix' is available locally.
$ # You could instead just download this and dump into the working directory (much faster).
$ # npm run package # see notes above.
$ npm run compile-smoke
$ npm run smokeTest # Use the `-- --tags=@wip` argument to run specific tests.
$ npm run smokeTest -- --help # for more information (see src/smoke/src/cli.ts)
$ npm run smokeTest:report # To generate report (output is './vscode test/reports/report.html')
```
## Overview
* These are a set of UI tests for the Python Extension in VSC.
* The UI is driven using the same infrastructure as used by `VS Code` for their smoke tests.
* [BDD](https://en.wikipedia.org/wiki/Behavior-driven_development) is used to create the tests, and executed using [cucumberjs](https://github.com/cucumber/cucumber-js).
## How to run smoke tests?
Here are the steps involved in running the tests:
* Setup environment:
* Pull down `ms-python-extension.vsix` from Azure Pipline.
* Download a completely fresh version of VS Code (`stable/insiders`. Defaults to `stable`).
(configurable using the `--channel=stable | --channel=insider`)
* Create a folder named `.vscode test` where test specific files will be created (reports, logs, VS Code, etc).
## How does it work?
* When launching VSC, we will launch it as a completely stand alone version of VSC.
* I.E. even if it is installed on the current machine, we'll download and launch a new instance.
* This new instance will not interfere with currently installed version of VSC.
* All user settings, etc will be in a separate directory (see `user` folder).
* VSC will not have any extensions. We are in control of what extensions are installed (see `.vscode test/extensions` folder).
* Automate VSC UI
* Use the VS Code smoke test API to automate the UI.
* The [BDD](https://en.wikipedia.org/wiki/Behavior-driven_development) tests are written and executed using [cucumberjs](https://github.com/cucumber/cucumber-js).
* Workspace folder/files
* Each [feature](https://docs.cucumber.io/gherkin/reference/#feature) can have its own set of files in the form of a github repo.
* Just add a tag with the path of the github repo url to the `feature`.
* When starting the tests for a feature, the repo is downloaded into a new random directory `.vscode test/temp/workspace folder xyz`
* At the begining of every scenario, we repeat the previous step.
* This ensures each scenario starts with a clean workspace folder.
* Reports
* Test results are stored in the `.vscode test/reports` directory
* These `json` (`cucumber format`) report files are converted into HTML using an `npm` script [cucumber-html-reporter](https://www.npmjs.com/package/cucumber-html-reporter).
* For each `scenario` that's executed, we create a corresponding directory in `.vscode test/reports` directory.
* This will contain all screenshots realted to that scenario.
* If the scenario fails, all logs, workspace folder are copied into this directory.
* Thus, when ever a test fails, we have everything related to that test.
* If the scenario passes, this directory is deleted (we don't need them on CI server).
## Technology
* 100% of the code is written in `nodejs`.
* The tests are written using [cucumberjs](https://github.com/cucumber/cucumber-js).
* VS Code [smoke tests API](https://github.com/microsoft/vscode/tree/master/test/smoke) is used to automate VS Code.
* `GitHub` repos are used to provide the files to be used for testing in a workspace folder.
* reports (`cucumber format`) are converted into HTML using an `npm` script [cucumber-html-reporter](https://www.npmjs.com/package/cucumber-html-reporter).
* Test result reports are generated using `junit` format, for Azure Devops.
## Files & Folders
* `~/vscode test` Directory used for storing everything related to a test run (VS Code, reports, logs, etc).
* `./stable` This is VS Code stable is downloaded.
* `./insider` This is VS Code insider is downloaded.
* `./user` Directory VS Code uses to store user information (settings, etc)
* `./extensions` This is where the extensions get installed for the instance of VSC used for testing.
* `./workspace folder` Folder opened in VS Code for testing
* `./temp path` Temporary directory for testing. (sometimes tests will create folders named `workspace folder xyz` to be used as workspace folders used for testing)
* `./reports` Location where generated reports are stored.
* `./logs` Logs for tests
* `./screenshots` Screen shots captured during tests
* `~/src/uitest/bootstrap` Contains just the bootstrap extension code.
* `~/src/uitests/features` [Feature files](https://cucumber.io/docs/gherkin/reference/#feature) used to drive the [BDD](https://en.wikipedia.org/wiki/Behavior-driven_development) tests are stored here.
* `~/src/uitests/src` Source code for smoke Tests (features, nodejs code, etc).
* `~/code/` Folder containing workspaces (python files) used for testing purposes.
## CI Integration
* For more details please check `build/ci`.
* We generally try to run all tests against all permutations of OS + Python Version + VSC
* I.e. we run tests across permutations of the follows:
- OS: Windows, Mac, Linux
- Python: 2.7, 3.5, 3.6, 3.7
- VSC: Stable, Insiders
* Each scenario is treated as a test
- These results are published on Azure Devops
- Artifacts are published containing a folder named `.vscode test/reports/<scenario name>`
- This folder contains all information related to that test run:
- Screenshots (including the point in time the test failed) for every step in the scenario (sequentially named files)
- VS Code logs (including output from the output panels)
- The workspace folder that was opened in VSC code (we have the exact files used by VSC)
- Our logs (Extension logs, debugger logs)
- Basically we have everything we'd need to diagnoze the failure.
* The report for the entire run is uploaded as part of the artifact for the test job.
- The HTML report contains test results (screenshots & all the steps).
* The same ui tests are run as smoke tests as part of a PR.
## Caveats
* The tests rely on the structure of the HTML elements (& their corresponding CSS/style attribute values).
- Basically we have hardcoded the CSS queries. If VS Code were to change these, then the tests would fail.
- One solution is to pin the UI tests against a stable version of VS Code.
- When ever a new version of VS Code is released, then move CSS queries from `insider` into `stable` found in the `src/uitests/src/selectors.ts` file.
- This way tests/CI will not fail and we'll have time to address the CSS/HTML changes.
## Miscellaneous
* For debugging follow these steps:
* Run the npm command `smokeTest:debug`
* Then attach the debugger using the debug configuration `Attach to Smoke Tests`.
* What about regular debugging?
* It has been observed that the instance of VSC launched for smoke tests just falls over when debugging from within VSC.
* Solution: Launch code in debug mode and attach (yes this works).
* Not entirely sure why it works, or why it doesn't work.
* Got a solution, hence not investing much more time time trying to identify why debugging is failing.
* In order to pass custom arguments to `cucumberjs`, refer to the `CLI` (pass `cucumber` specific args after `--` in `npm run smokeTest`).
* E.g. `npm run smokeTest -- --tags=@wip --more-cucumberjs-args`
* Remember, the automated UI interactions can be faster than normal user interactions.
* E.g. just because we started debugging (using command `Debug: Start Debugging`), that doesn't mean the debug panel will open immediately. User interactions are slower compared to code execution.
* Solution, always wait for the UI elements to be available/active. E.g. when you open a file, check whether the corresponding elements are visible.
## Code Overview
* Tests are written in nodejs. Why?
* Short answer - We're using the VS Code Smoke test infrastructure.
* Previously we wrote tests using `selenium`. However a week after the tests were running, VSC released a new version. This new version of VSC had a version of Electron + Chromium that didn't have a compatible version of `chrome driver`.
* The chrome `chrome driver` is used by `selenium` to drive the tests. Also using `selenium` we had tonnes of issues.
* Solution - Use the same technique used by VS Code to drive their UI Tests.
* Code borrowed from VS Code ([src/smoke/vscode](https://github.com/microsoft/vscode-python/tree/master/src/smoke/vscode)).
* Short answer - We're using the VS Code Smoke test infrastructure (this is where that code resides).
* The code in [src/smoke/vscode](https://github.com/microsoft/vscode-python/tree/master/src/smoke/vscode) code has been borrowed from [VS Code Smoke tests](https://github.com/microsoft/vscode/tree/master/test/smoke).
* This contains the code required to launch VS Code and drive some tests.
* Rather than picking and choosing some files, we've copied the entire source folder.
* This makes it easy to update this code with later versions of changes from upstream VS Code.
* We could optionally package this into a seperate `npm package` and pull it in for testing purposes, however that adds the overhead of maintaining an `npm package`.
* There's also the option of creating a seprate repo and publishign this code into a internal package repository (`GitHub` or `Azure Pipelines`).
* To be discussed
* Bootstrap extension ([src/smoke/bootstrap](https://github.com/microsoft/vscode-python/tree/master/src/smoke/bootstrap))
* Short answer - Used to update the `settings.json` and detect loading of `Python Extension`.
* When updating settings in VSC, do not alter the settings files directly. VSC could take a while to detect file changes and load the settings.
- An even better way, is to use the VSC api to update the settings (via the bootstrap API) or edit the settings file directly through the UI.
- Updating settings through the editor (by editing the `settings.json` file directly is not easy, as its not easy to update/remove settings).
- Using the API we can easily determine when VSC is aware of the changes (basically when API completes, VSC is aware of the new settings).
- (This is made possible by writing the settings to be updated into `settingsToUpdate.txt`, and letting the bootstrap extension read that file and update the VSC settings using the VSC API).
* Similarly checking whether the `Python Extension` has activated is done by the `bootstrap` extension by creating a new status bar item
* The prescence of this new status bar indicates the fact that the extension has activated successfully.
* The code for this extension resides in [src/smoke/bootstrap](https://github.com/microsoft/vscode-python/tree/master/src/smoke/bootstrap)

Просмотреть файл

@ -0,0 +1,10 @@
.vscode/**
.vscode-test/**
out/test/**
out/**/*.map
src/**
.gitignore
tsconfig.json
vsc-extension-quickstart.md
tslint.json
*.vsix

Просмотреть файл

@ -0,0 +1,205 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
Object.defineProperty(exports, '__esModule', { value: true });
const vscode = require('vscode');
const fs = require('fs');
const path = require('path');
const util = require('util');
let activated = false;
async function sleep(timeout) {
return new Promise(resolve => setTimeout(resolve, timeout));
}
function activate(context) {
const statusBarItemActivated = vscode.window.createStatusBarItem(vscode.StatusBarAlignment.Left, 10000000);
const lineColumnStatusBarItem = vscode.window.createStatusBarItem(vscode.StatusBarAlignment.Left, 10000000);
const statusBarItem = vscode.window.createStatusBarItem(vscode.StatusBarAlignment.Left, 10000000);
statusBarItem.command = 'workbench.action.quickOpen';
statusBarItem.text = '1';
statusBarItem.tooltip = 'Py';
statusBarItem.show();
lineColumnStatusBarItem.command = 'workbench.action.quickOpen';
lineColumnStatusBarItem.text = '';
lineColumnStatusBarItem.tooltip = 'PyLine';
lineColumnStatusBarItem.show();
context.subscriptions.push(statusBarItem);
context.subscriptions.push(lineColumnStatusBarItem);
// Always display editor line, column in this statusbar.
// Sometimes we cannot detect the line,column of editor (because that item in statubar is not visbible due to lack of realestate).
// This will get around that problem.
vscode.window.onDidChangeTextEditorSelection(e => {
try {
lineColumnStatusBarItem.text = `${e.textEditor.selection.start.line + 1},${e.textEditor.selection.start.character + 1}`;
} catch {}
});
// vscode.window.onDidChangeActiveTextEditor()
let lastSetText = '';
let interval = undefined;
function monitorEditor() {
clearInterval(interval);
interval = setInterval(() => {
if (vscode.window.activeTextEditor) {
try {
const newText = `${vscode.window.activeTextEditor.selection.start.line + 1},${vscode.window.activeTextEditor.selection.start.character + 1}`;
if (lastSetText === newText) {
return;
}
lastSetText = lineColumnStatusBarItem.text = newText;
} catch {}
}
}, 500);
}
vscode.window.onDidChangeActiveTextEditor(monitorEditor);
vscode.window.onDidChangeVisibleTextEditors(monitorEditor);
vscode.commands.registerCommand('smoketest.activatePython', async () => {
if (activated) {
return;
}
// lsOutputDisplayed.text = '';
const ext = vscode.extensions.getExtension('ms-python.python');
if (!ext.isActive) {
await ext.activate();
console.log('Bootstrap extension');
console.log('ext.exports');
console.log(ext.exports);
// Wait for extension to complete.
await ext.exports.ready;
}
statusBarItemActivated.text = '2';
statusBarItemActivated.tooltip = 'Py2';
// Don't remove this command, else the CSS selector for this will be different.
// VSC will render a span if there's no span.
statusBarItemActivated.command = 'workbench.action.quickOpen';
statusBarItemActivated.show();
activated = true;
context.subscriptions.push(statusBarItemActivated);
});
vscode.commands.registerCommand('smoketest.viewLanguageServerOutput', async () => {
// Keep trying until command can be executed without any errors.
// If there are errors, this means the command hasn't (yet) been registered by the extension.
for (let i = 0; i < 100000; i += 1) {
sleep(10);
const success = await new Promise((resolve, reject) => vscode.commands.executeCommand('python.viewLanguageServerOutput').then(resolve, reject))
.then(() => true)
.catch(() => false);
if (!success) {
continue;
}
}
});
vscode.commands.registerCommand('smoketest.runInTerminal', async () => {
const filePath = path.join(__dirname, '..', 'commands.txt');
const command = fs
.readFileSync(filePath)
.toString()
.trim();
for (let counter = 0; counter < 5; counter++) {
if (!vscode.window.activeTerminal) {
await sleep(5000);
}
}
if (!vscode.window.activeTerminal) {
vscode.window.createTerminal('Manual');
await sleep(5000);
}
if (!vscode.window.activeTerminal) {
vscode.window.showErrorMessage('No Terminal in Bootstrap Extension');
}
await vscode.window.activeTerminal.sendText(command, true);
fs.unlinkSync(filePath);
});
vscode.commands.registerCommand('smoketest.updateSettings', async () => {
const filePath = path.join(__dirname, '..', 'settingsToUpdate.txt');
try {
const setting = getSettingsToUpdateRemove(filePath);
const configTarget =
setting.type === 'user'
? vscode.ConfigurationTarget.Global
: setting.type === 'workspace'
? vscode.ConfigurationTarget.Workspace
: vscode.ConfigurationTarget.WorkspaceFolder;
if (configTarget === vscode.ConfigurationTarget.WorkspaceFolder && !setting.workspaceFolder) {
vscode.window.showErrorMessage('Workspace Folder not defined for udpate/remove of settings');
throw new Error('Workspace Folder not defined');
}
const resource = setting.workspaceFolder ? vscode.Uri.file(setting.workspaceFolder) : undefined;
for (let settingToRemove in setting.remove || []) {
const parentSection = settingToRemove.split('.')[0];
const childSection = settingToRemove
.split('.')
.filter((_, i) => i > 0)
.join('.');
const settings = vscode.workspace.getConfiguration(parentSection, resource);
await settings.update(childSection, undefined, configTarget);
}
for (let settingToAddUpdate in setting.update || []) {
const parentSection = settingToAddUpdate.split('.')[0];
const childSection = settingToAddUpdate
.split('.')
.filter((_, i) => i > 0)
.join('.');
const settings = vscode.workspace.getConfiguration(parentSection, resource);
await settings.update(childSection, setting.update[settingToAddUpdate], configTarget);
}
fs.unlinkSync(filePath);
} catch (ex) {
fs.appendFileSync(path.join(__dirname, '..', 'settingsToUpdate_error.txt'), util.format(ex));
}
});
vscode.commands.registerCommand('smoketest.openFile', async () => {
const file = fs
.readFileSync(path.join(__dirname, '..', 'commands.txt'))
.toString()
.trim();
const doc = await vscode.workspace.openTextDocument(file);
await vscode.window.showTextDocument(doc);
});
// Custom command to stop debug sessions.
// Basically we need a way to stop any existing debug sessions.
// Using the vsc command, as we can invoke it even if a debugger isn't running.
// We can use the command `Debug: Stop` from the command palette only if a debug session is active.
// Using this approach we can send a command regardless, easy.
vscode.commands.registerCommand('smoketest.stopDebuggingPython', async () => {
try {
await vscode.commands.executeCommand('workbench.action.debug.stop');
} catch {
// Do nothing.
}
});
}
/**
* @typedef {Object} SettingsToUpdate - creates a new type named 'SpecialType'
* @property {'user' | 'workspace' | 'workspaceFolder'} [type] - Type.
* @property {?string} workspaceFolder - Workspace Folder
* @property {Object.<string, object>} update - Settings to update.
* @property {Array<string>} remove - Skip format checks.
*/
/**
*
*
* @param {*} filePath
* @return {SettingsToUpdate} Settings to update/remove.
*/
function getSettingsToUpdateRemove(filePath) {
return JSON.parse(
fs
.readFileSync(filePath)
.toString()
.trim()
);
}
exports.activate = activate;
function deactivate() {
// Do nothing.
}
exports.deactivate = deactivate;

Просмотреть файл

@ -0,0 +1,54 @@
{
"name": "smoketest",
"publisher": "ms-python",
"displayName": "smokeTestPython",
"description": "Bootstrap for Python Smoke Tests",
"version": "0.0.1",
"license": "MIT",
"homepage": "https://github.com/Microsoft/vscode-python",
"repository": {
"type": "git",
"url": "https://github.com/Microsoft/vscode-python"
},
"bugs": {
"url": "https://github.com/Microsoft/vscode-python/issues"
},
"qna": "https://stackoverflow.com/questions/tagged/visual-studio-code+python",
"engines": {
"vscode": "^1.32.0"
},
"categories": [
"Other"
],
"activationEvents": [
"*"
],
"main": "./extension",
"contributes": {
"commands": [
{
"command": "smoketest.activatePython",
"title": "Activate Python Extension"
},
{
"command": "smoketest.stopDebuggingPython",
"title": "Stop Debugging Python"
},
{
"command": "smoketest.runInTerminal",
"title": "Smoke: Run Command In Terminal"
},
{
"command": "smoketest.updateSettings",
"title": "Smoke: Update Settings"
},
{
"command": "smoketest.viewLanguageServerOutput",
"title": "Smoke: Show Language Server Output Panel"
}
]
},
"scripts": {
"build": "vsce package --out ../bootstrap.vsix"
}
}

Просмотреть файл

@ -0,0 +1,19 @@
import sys
print(sys.executable)
class MyClass:
def __init__(self):
self.name = "Don"
self.age = 123
def say_something(self):
print(self.age)
print(self.name)
return "ok"
x = MyClass()
print(x.say_something())
print(x.age)
print(x.name)

Просмотреть файл

@ -0,0 +1,3 @@
some_variable_name = 1
print(some_variable_name)

Просмотреть файл

@ -0,0 +1,42 @@
# Tags
* @wip
* Used only for debugging purposes.
* When debugging in VSC, only features/scenarios with @wip tag will be executed.
* @skip
* Used to skip a feature/scenario.
* @https://github.com/xxx/yyy.git
* Can only be used at a feature level.
* The conents of the above repo will be used as the contents of the workspace folder.
* Note: assume the tag is `@https://github.com/DonJayamanne/pyvscSmokeTesting.git`
* The above repo is cloned directly into the workspace.
* If however the tag is `@https://github.com/DonJayamanne/pyvscSmokeTesting/tests`
* Now, the contents of the workspace is the `tests` directory in the above repo.
* This allows us to have a single repo with files/tests for more than just one feature/scenario.
* Else we'd need to have multiple repos for each feature/scenario.
* @code:<path relative to uitests folder>
* Can only be used at a feature level.
* The conents of the above folder will be used as the contents of the workspace folder.
* Note: assume the tag is `@code:some folder/pythonFiles`
* The contents of the above folder is copied recursively into the workspace.
* This allows us to have a single repo with files/tests for more than just one feature/scenario.
* Else we'd need to have multiple repos for each feature/scenario.
* @mac, @win, @linux
* Used to ensure a particular feature/scenario runs only in mac, win or linux respectively.
* @python2, @python3, @python3.5, @python3.6, @python3.7
* Used to ensure a particular feature/scenario runs only in specific version of Python, respectively.
* @insider
* Used to ensure a particular feature/scenario runs only in VS Code Insiders.
* @stable
* Used to ensure a particular feature/scenario runs only in VS Code Stable.
* @smoke
* All smoke test related functionality.
* @test
* All testing related functionality.
* @debug
* All debugger related functionality.
* @terminal
* All terminal related functionality.
* @terminal.venv
* Related to virtual environments (`python -m venv`)
* @terminal.pipenv
* Related to pipenv environments (`pipenv shell`)

Просмотреть файл

@ -0,0 +1,39 @@
# @ds @smoke
# @https://github.com/DonJayamanne/vscode-python-uitests/datascience
# Feature: Data Science
# Scenario: Can display an image and print text into the interactive window
# Given the package "jupyter" is installed
# And a file named "log.log" does not exist
# # Increase font size for text detection.
# And the workspace setting "editor.fontSize" has the value 15
# And the file "smoke.py" is open
# When I wait for the Python extension to activate
# # Code will display an image and print stuff into interactive window.
# When I select the command "Python: Run All Cells"
# # Wait for Interactive Window to open
# And I wait for 10 seconds
# # Close the file, to close it, first set focus to it by opening it again.
# And I open the file "smoke.py"
# And I select the command "View: Revert and Close Editor"
# And I select the command "View: Close Panel"
# # Wait for 2 minutes for Jupyter to start
# Then a file named "log.log" will be created within 120 seconds
# # This is the content of the image rendered in the interactive window.
# # And the text "VSCODEROCKS" is displayed in the Interactive Window
# # # This is the content printed by a python script.
# # And the text "DATASCIENCEROCKS" is displayed in the Interactive Window
# Scenario: Workspace directory is used as cwd for untitled python files
# Given the package "jupyter" is installed
# And a file named "log.log" does not exist
# When I wait for the Python extension to activate
# When I create an untitled Python file with the following content
# """
# open("log.log", "w").write("Hello")
# """
# # Code will display an image and print stuff into interactive window.
# When I select the command "Python: Run All Cells"
# # Wait for Interactive Window to open
# And I wait for 10 seconds
# # Wait for 2 minutes for Jupyter to start
# Then a file named "log.log" will be created within 120 seconds

Просмотреть файл

@ -0,0 +1,86 @@
@debugging
Feature: Debugging
Scenario: Debugging a python file without creating a launch configuration (with delays in user code)
"""
Ensure we can debug a python file (the code in the python file is slow).
I.e. it will not run to completion immediately.
"""
Given the file ".vscode/launch.json" does not exist
And a file named "simple sample.py" is created with the following content
"""
# Add a minor delay for tests to confirm debugger has started
import time
time.sleep(2)
print("Hello World")
open("log.log", "w").write("Hello")
"""
When I wait for the Python extension to activate
When I open the file "simple sample.py"
When I select the command "Debug: Start Debugging"
Then the Python Debug Configuration picker is displayed
When I select the debug configuration "Python File"
# This is when VSC displays the toolbar, (but actual debugger may not have started just yet).
Then the debugger starts
# Starting the debugger takes a while, (open terminal, activate it, etc)
And the debugger will stop within 20 seconds
And a file named "log.log" will be created
Scenario: Confirm Run without debugging without creating a launch configuration works
"""
Ensure we can run a python file without debugging.
I.e. it will not run to completion immediately.
In the past when the debugger would run to completion quicly, the debugger wouldn't work correctly.
Here, we need to ensure that no notifications/messages are displayed at the end of the debug session.
(in the past VSC would display error messages).
"""
Given the file ".vscode/launch.json" does not exist
And a file named "simple sample.py" is created with the following content
"""
print("Hello World")
open("log.log", "w").write("Hello")
"""
When I wait for the Python extension to activate
# For for some time for all messages to be displayed, then hide all of them.
Then wait for 10 seconds
And select the command "Notifications: Clear All Notifications"
When I open the file "simple sample.py"
And I select the command "Debug: Start Without Debugging"
# This is when VSC displays the toolbar, (but actual debugger may not have started just yet).
Then the debugger starts
# Starting the debugger takes a while, (open terminal, activate it, etc)
And the debugger will stop within 5 seconds
And a file named "log.log" will be created
And take a screenshot
And no error notifications are displayed
@smoke
Scenario: Debugging a python file without creating a launch configuration (hello world)
"""
In the past when the debugger would run to completion quicly, the debugger wouldn't work correctly.
Here, we need to ensure that no notifications/messages are displayed at the end of the debug session.
(in the past VSC would display error messages).
"""
Given the file ".vscode/launch.json" does not exist
And a file named "simple sample.py" is created with the following content
"""
print("Hello World")
open("log.log", "w").write("Hello")
"""
When I wait for the Python extension to activate
# For for some time for all messages to be displayed, then hide all of them.
Then wait for 10 seconds
And select the command "Notifications: Clear All Notifications"
When I open the file "simple sample.py"
And I select the command "Debug: Start Debugging"
Then the Python Debug Configuration picker is displayed
When I select the debug configuration "Python File"
# This is when VSC displays the toolbar, (but actual debugger may not have started just yet).
Then the debugger starts
# Starting the debugger takes a while, (open terminal, activate it, etc)
And the debugger will stop within 20 seconds
And a file named "log.log" will be created
Then take a screenshot
And no error notifications are displayed

Просмотреть файл

@ -0,0 +1,65 @@
@debugging
Feature: Debugging
Scenario: Debugging a python file with breakpoints
Given a file named ".vscode/launch.json" is created with the following content
"""
{
"version": "0.2.0",
"configurations": [
{
"name": "Python: Current File",
"type": "python",
"request": "launch",
"program": "${workspaceFolder}/simple sample.py",
"console": "integratedTerminal"
}
]
}
"""
And a file named "simple sample.py" is created with the following content
"""
open("log.log", "w").write("Hello")
"""
When I wait for the Python extension to activate
And I open the file "simple sample.py"
And I add a breakpoint to line 1
And I select the command "View: Close All Editors"
And I select the command "Debug: Start Debugging"
Then the debugger starts
And the debugger pauses
And the file "simple sample.py" is opened
And the cursor is on line 1
And the current stack frame is at line 1 in "simple sample.py"
When I select the command "Debug: Continue"
Then the debugger stops
Scenario: Debugging a python file without breakpoints
Given a file named ".vscode/launch.json" is created with the following content
"""
{
"version": "0.2.0",
"configurations": [
{
"name": "Python: Current File",
"type": "python",
"request": "launch",
"program": "${workspaceFolder}/simple sample.py",
"console": "integratedTerminal",
"stopOnEntry": true
}
]
}
"""
And a file named "simple sample.py" is created with the following content
"""
open("log.log", "w").write("Hello")
"""
When I wait for the Python extension to activate
And I select the command "Debug: Start Debugging"
Then the debugger starts
And the debugger pauses
And the file "simple sample.py" is opened
And the cursor is on line 1
And the current stack frame is at line 1 in "simple sample.py"
When I select the command "Debug: Continue"
Then the debugger stops

Просмотреть файл

@ -0,0 +1,91 @@
# Feature: Debugger
# @debug @WorkspaceFolder:/Users/donjayamanne/Desktop/Development/PythonStuff/smoke_tests/env_0-virtualenv
# Scenario: Debug Python File with launch.json
# Given the file "main.py" is open
# When stopOnEntry is false in launch.json
# When I add a breakpoint to line 6
# When I select the command "View: Toggle Integrated Terminal"
# When I press "F5"
# Then debugger starts
# Then take a screenshot
# When I open the debug console
# Then the text "Application launched successfully" is displayed in the debug console
# Then take a screenshot
# Then number of variables in variable window is 1
# When I select the command "Debug: Step Over"
# Then stack frame for file "main.py" is displayed
# When I select the command "Debug: Step Over"
# Then stack frame for file "main.py" and line 6 is displayed
# When I select the command "Debug: Step Over"
# Then stack frame for file "main.py" and line 5 is displayed
# When I select the command "Debug: Step Over"
# When I select the command "Debug: Step Into"
# Then stack frame for file "wow.py" and line 7 is displayed
# When I select the command "Debug: Continue"
# Then debugger stops
# @debug @WorkspaceFolder:/Users/donjayamanne/Desktop/Development/PythonStuff/smoke_tests/env_0-virtualenv
# Scenario: Debug Python File without launch.json
# Given the file "main.py" is open
# Given the file ".vscode/launch.json" does not exist
# When I add a breakpoint to line 6
# When I select the command "View: Toggle Integrated Terminal"
# When I press "F5"
# Then debugger starts
# Then take a screenshot
# When I open the debug console
# Then the text "Application launched successfully" is displayed in the debug console
# Then take a screenshot
# Then number of variables in variable window is 1
# When I select the command "Debug: Step Over"
# Then stack frame for file "main.py" is displayed
# When I select the command "Debug: Step Over"
# Then stack frame for file "main.py" and line 6 is displayed
# When I select the command "Debug: Step Over"
# Then stack frame for file "main.py" and line 5 is displayed
# When I select the command "Debug: Step Over"
# When I select the command "Debug: Step Into"
# Then stack frame for file "wow.py" and line 7 is displayed
# When I select the command "Debug: Continue"
# Then debugger stops
# @debug @WorkspaceFolder:/Users/donjayamanne/Desktop/Development/PythonStuff/smoke_tests/env_0-virtualenv
# Scenario: Debug Python File and stop on entry
# Given the file "debugAndStopOnEntry.py" is open
# When stopOnEntry is true in launch.json
# When I open the file "debugAndStopOnEntry.py"
# When I press "F5"
# Then debugger starts
# Then take a screenshot
# Then stack frame for file "debugAndStopOnEntry.py" and line 3 is displayed
# When I select the command "Debug: Continue"
# Then debugger stops
# @debug @WorkspaceFolder:/Users/donjayamanne/Desktop/Development/PythonStuff/smoke_tests/env_0-virtualenv
# Scenario: Debug Python File without breakpoints
# Given the file "debugWithoutBreakpoints.py" is open
# When I press "F5"
# Then debugger starts
# Then take a screenshot
# Then debugger stops
# When I select the command "View: Debug Console"
# Then the text "Debugging completed" is displayed in the debug console
# Then take a screenshot
# @debug @WorkspaceFolder:/Users/donjayamanne/Desktop/Development/PythonStuff/smoke_tests/env_0-virtualenv
# Scenario: Run Python File without debugging
# Given the file "runWithoutDebugging.py" is open
# When I select the command "Debug: Start Without Debugging"
# Then debugger stops
# When I select the command "View: Debug Console"
# Then the text "Ran without debugging" is displayed in the debug console
# Then take a screenshot
# @debug @WorkspaceFolder:/Users/donjayamanne/Desktop/Development/vscode/smokeTests/debugSimple
# Scenario: Run Python File without debugging
# Given the file "runWithoutDebugging.py" is open
# When I select the command "Debug: Start Without Debugging"
# Then debugger stops
# When I select the command "View: Debug Console"
# Then the text "Ran without debugging and no launch.json" is displayed in the debug console
# Then take a screenshot

Просмотреть файл

@ -0,0 +1,89 @@
# @terminal
# Feature: Environment Files
# Background: Activted Extension
# Given the Python extension has been activated
# Given a file named ".env" is created with the following content
# """
# MY_FILE_NAME=log1.log
# """
# Given a file named ".env2" is created with the following content
# """
# MY_FILE_NAME=log2.log
# """
# Given a file named "simple sample.py" is created with the following content
# """
# import os
# file_name = os.environ.get("MY_FILE_NAME", "other.log")
# with open(file_name, "w") as fp:
# fp.write("Hello")
# """
# And a file named "log1.log" does not exist
# And a file named "log2.log" does not exist
# Scenario: Environment variable defined in default environment file is used by debugger
# Given a file named ".vscode/launch.json" is created with the following content
# """
# {
# "version": "0.2.0",
# "configurations": [
# {
# "name": "Python: Current File",
# "type": "python",
# "request": "launch",
# "program": "${workspaceFolder}/simple sample.py",
# "console": "integratedTerminal"
# }
# ]
# }
# """
# When I open the file "simple sample.py"
# And I select the command "Debug: Start Debugging"
# Then the debugger starts
# And the debugger stops
# And a file named "log1.log" will be created
# Scenario: Environment variable defined in envFile of launch.json is used by debugger
# Given a file named ".vscode/launch.json" is created with the following content
# """
# {
# "version": "0.2.0",
# "configurations": [
# {
# "name": "Python: Current File",
# "type": "python",
# "request": "launch",
# "program": "${workspaceFolder}/simple sample.py",
# "console": "integratedTerminal",
# "envFile": "${workspaceFolder}/.env2"
# }
# ]
# }
# """
# When I open the file "simple sample.py"
# And I select the command "Debug: Start Debugging"
# Then the debugger starts
# And the debugger stops
# And a file named "log2.log" will be created
# Scenario: Environment variable defined in envFile of settings.json is used by debugger
# Given the workspace setting "python.envFile" has the value "${workspaceFolder}/.env2"
# Given a file named ".vscode/launch.json" is created with the following content
# """
# {
# "version": "0.2.0",
# "configurations": [
# {
# "name": "Python: Current File",
# "type": "python",
# "request": "launch",
# "program": "${workspaceFolder}/simple sample.py",
# "console": "integratedTerminal"
# }
# ]
# }
# """
# When I open the file "simple sample.py"
# And I select the command "Debug: Start Debugging"
# Then the debugger starts
# And the debugger stops
# And a file named "log2.log" will be created

Просмотреть файл

@ -0,0 +1,32 @@
@terminal
Feature: Interpreter
@mac @python2
Scenario: Display message when selecting default Mac 2.7 Interpreter
Given the Python extension has been activated
When I select the Python Interpreter containing the text "/usr/bin/python"
Then a message containing the text "You have selected the macOS system install of Python" is displayed
Scenario: Opening VS Code for the first time will display tip about selecting interpreter
Given VS Code is opened for the first time
When the Python extension has activated
Then a message containing the text "Tip: you can change the Python interpreter used by the Python extension by clicking" is displayed
Scenario: Re-opening VS Code will display tip about selecting interpreter
Given VS Code is opened for the first time
When the Python extension has activated
Then a message containing the text "Tip: you can change the Python interpreter used by the Python extension by clicking" is displayed
When I reload VS Code
And the Python extension has activated
Then a message containing the text "Tip: you can change the Python interpreter used by the Python extension by clicking" is displayed
Scenario: Re-opening VS Code will not display tip about selecting interpreter after clicking the 'Got it' button
Given VS Code is opened for the first time
Then the Python extension has activated
Then a message containing the text "Tip: you can change the Python interpreter used by the Python extension by clicking" is displayed
When I click the "Got it!" button for the message with the text "Tip: you can change the Python interpreter used by the Python extension by clicking"
# Wait for state information to get persisted (of the fact that we closed this message).
# I.e. wait a while before we close VS Code.
And wait for 5 seconds
And I reload VS Code
And the Python extension has activated
Then a message containing the text "Tip: you can change the Python interpreter used by the Python extension by clicking" is not displayed

Просмотреть файл

@ -0,0 +1,88 @@
# @terminal @terminal.conda
# @skip
# @https://github.com/DonJayamanne/vscode-python-uitests/terminal/execution
# Feature: Terminal (conda)
# Scenario: Interpreter display name contains the name of the environment and conda
# Given the user setting "python.pythonPath" does not exist
# And a conda environment is created with the name "helloworld"
# Then take a screenshot
# When I wait for 20 seconds
# Then take a screenshot
# # Wait for some time for the new conda environment to get discovered.
# When I reload VSC
# Then take a screenshot
# When I send the command "conda env list" to the terminal
# When I wait for 5 seconds
# Then take a screenshot
# When I wait for 20 seconds
# Then take a screenshot
# When I select the command "Python: Select Interpreter"
# When I wait for 3 seconds
# Then take a screenshot
# When I reload VSC
# And I wait for 30 seconds
# And I select the Python Interpreter containing the name "helloworld"
# Then take a screenshot
# Then the python interpreter displayed in the the status bar contains the value "conda" in the display name
# And the python interpreter displayed in the the status bar contains the value "helloworld" in the display name
# And the workspace setting "python.pythonPath" exists
# # @preserve.workspace
# # Scenario: Pipenv is auto selected
# # Given the workspace setting "python.pythonPath" does not exist
# # And the user setting "python.pythonPath" does not exist
# # When I reload VSC
# # Then the python interpreter displayed in the the status bar contains the value "pipenv" in the display name
# # And the python interpreter displayed in the the status bar contains the value "workspace folder" in the display name
# # And the workspace setting "python.pythonPath" exists
# # @preserve.workspace
# # Scenario: Pipenv is not auto selected (if we already have a local interpreter selected)
# # Given a generic Python Interpreter is selected
# # When I reload VSC
# # Then the python interpreter displayed in the the status bar does not contain the value "pipenv" in the display name
# # And the python interpreter displayed in the the status bar does not contain the value "workspace folder" in the display name
# # And the workspace setting "python.pythonPath" exists
# # @preserve.workspace
# # Scenario: Pipenv is not auto selected (if we have a global interpreter selected)
# # Given the workspace setting "python.pythonPath" does not exist
# # And the user setting "python.pythonPath" exists
# # When I reload VSC
# # Then open the file "settings.json"
# # Then the python interpreter displayed in the the status bar does not contain the value "pipenv" in the display name
# # And the python interpreter displayed in the the status bar does not contain the value "workspace folder" in the display name
# # @preserve.workspace
# # Scenario: Environment is not activated in the Terminal
# # Given the workspace setting "python.pythonPath" does not exist
# # And the user setting "python.pythonPath" does not exist
# # When I reload VSC
# # Then the python interpreter displayed in the the status bar contains the value "pipenv" in the display name
# # And the python interpreter displayed in the the status bar contains the value "workspace folder" in the display name
# # Given the file "write_pyPath_in_log.py" is open
# # And a file named "log.log" does not exist
# # And the workspace setting "python.terminal.activateEnvironment" is disabled
# # And a terminal is opened
# # When I send the command "python run_in_terminal.py" to the terminal
# # Then a file named "log.log" is created
# # And open the file "log.log"
# # And the file "log.log" does not contain the value "workspace_folder"
# # And take a screenshot
# # @preserve.workspace
# # Scenario: Environment is activated in the Terminal
# # Given the workspace setting "python.pythonPath" does not exist
# # And the user setting "python.pythonPath" does not exist
# # When I reload VSC
# # Then the python interpreter displayed in the the status bar contains the value "pipenv" in the display name
# # And the python interpreter displayed in the the status bar contains the value "workspace folder" in the display name
# # Given the file "run_in_terminal.py" is open
# # And a file named "log.log" does not exist
# # And the workspace setting "python.terminal.activateEnvironment" is enabled
# # And a terminal is opened
# # When I send the command "python run_in_terminal.py" to the terminal
# # Then a file named "log.log" is created
# # And open the file "log.log"
# # And the file "log.log" contains the value "workspace_folder"
# # And take a screenshot

Просмотреть файл

@ -0,0 +1,19 @@
# Feature: Interpreters
# @WorkspaceFolder:/Users/donjayamanne/Desktop/Development/PythonStuff/smoke_tests/env_0-virtualenv
# Scenario: Validate selection of interpreter
# Given some random interpreter is selected
# When I select a python interpreter
# Then interpreter informantion in status bar has refreshed
# @WorkspaceFolder:/Users/donjayamanne/Desktop/Development/PythonStuff/smoke_tests/env_0-virtualenv
# Scenario: Validate selection of interpreter when nothing was selected
# Given there is no python path in settings.json
# When I select a python interpreter
# Then interpreter informantion in status bar has refreshed
# # @pipenv
# # Scenario: Auto select existing pipenv
# # Given the setting 'python.pythonPath' does not exist
# # When I reload vscode
# # Then settings.json will automatically be updated with pythonPath
# # Then the selected interpreter contains the name 'pipenv'

Просмотреть файл

@ -0,0 +1,71 @@
# @terminal @terminal.pipenv
# @https://github.com/DonJayamanne/vscode-python-uitests/terminal/execution
# Feature: Terminal (pipenv)
# Scenario: Interpreter display name contains the name of the current workspace folder and pipenv
# Given the user setting "python.pythonPath" does not exist
# And a pipenv environment is created
# When I reload VSC
# And I select the Python Interpreter containing the name "workspace folder pipenv"
# Then the python interpreter displayed in the the status bar contains the value "pipenv" in the display name
# And the python interpreter displayed in the the status bar contains the value "workspace folder" in the display name
# And the workspace setting "python.pythonPath" exists
# @preserve.workspace
# Scenario: Pipenv is auto selected
# Given the workspace setting "python.pythonPath" does not exist
# And the user setting "python.pythonPath" does not exist
# When I reload VSC
# Then the python interpreter displayed in the the status bar contains the value "pipenv" in the display name
# And the python interpreter displayed in the the status bar contains the value "workspace folder" in the display name
# And the workspace setting "python.pythonPath" exists
# @preserve.workspace
# Scenario: Pipenv is not auto selected (if we already have a local interpreter selected)
# Given a generic Python Interpreter is selected
# When I reload VSC
# Then the python interpreter displayed in the the status bar does not contain the value "pipenv" in the display name
# And the python interpreter displayed in the the status bar does not contain the value "workspace folder" in the display name
# And the workspace setting "python.pythonPath" exists
# @preserve.workspace
# Scenario: Pipenv is not auto selected (if we have a global interpreter selected)
# Given the workspace setting "python.pythonPath" does not exist
# And the user setting "python.pythonPath" exists
# When I reload VSC
# Then open the file "settings.json"
# Then the python interpreter displayed in the the status bar does not contain the value "pipenv" in the display name
# And the python interpreter displayed in the the status bar does not contain the value "workspace folder" in the display name
# @preserve.workspace
# Scenario: Environment is not activated in the Terminal
# Given the workspace setting "python.pythonPath" does not exist
# And the user setting "python.pythonPath" does not exist
# When I reload VSC
# Then the python interpreter displayed in the the status bar contains the value "pipenv" in the display name
# And the python interpreter displayed in the the status bar contains the value "workspace folder" in the display name
# Given the file "write_pyPath_in_log.py" is open
# And a file named "log.log" does not exist
# And the workspace setting "python.terminal.activateEnvironment" is disabled
# And a terminal is opened
# When I send the command "python run_in_terminal.py" to the terminal
# Then a file named "log.log" is created
# And open the file "log.log"
# And the file "log.log" does not contain the value "workspace_folder"
# And take a screenshot
# @preserve.workspace
# Scenario: Environment is activated in the Terminal
# Given the workspace setting "python.pythonPath" does not exist
# And the user setting "python.pythonPath" does not exist
# When I reload VSC
# Then the python interpreter displayed in the the status bar contains the value "pipenv" in the display name
# And the python interpreter displayed in the the status bar contains the value "workspace folder" in the display name
# Given the file "run_in_terminal.py" is open
# And a file named "log.log" does not exist
# And the workspace setting "python.terminal.activateEnvironment" is enabled
# And a terminal is opened
# When I send the command "python run_in_terminal.py" to the terminal
# Then a file named "log.log" is created
# And open the file "log.log"
# And the file "log.log" contains the value "workspace_folder"
# And take a screenshot

Просмотреть файл

@ -0,0 +1,37 @@
@terminal
Feature: Statusbar
@smoke
Scenario: Interpreter is displayed in the statusbar when a python file is opened
When I create a new file
And I change the language of the file to "Python"
And the Python extension has activated
Then the python the status bar contains the text "Python"
@status
Scenario: Interpreter is displayed in the statusbar when the extension is activated
When the Python extension has activated
Then the python the status bar contains the text "Python"
@python2
Scenario: Can select a Python 2.7 interpreter and the statusbar will be updated accordingly
Given the Python extension has been activated
When I select the Python Interpreter containing the text "2.7"
Then the python the status bar contains the text "2.7"
And the python the status bar does not contain the text "3."
@python3
Scenario: Can select a Python 3. interpreter and the statusbar will be updated accordingly
Given the Python extension has been activated
When I select the Python Interpreter containing the text "3."
Then the python the status bar contains the text "3."
And the python the status bar does not contain the text "2.7"
@python2 @python3
Scenario: Can switch between 2.7 and 3.* interpreters and the statusbar will be updated accordingly
Given the Python extension has been activated
When I select the Python Interpreter containing the text "2.7"
Then the python the status bar contains the text "2.7"
And the python the status bar does not contain the text "3."
When I select the Python Interpreter containing the text "3."
Then the python the status bar contains the text "3."
And the python the status bar does not contain the text "2.7"

Просмотреть файл

@ -0,0 +1,57 @@
@terminal
Feature: Terminal
Background: Activted Extension
Then take a screenshot
Given the Python extension has been activated
Then take a screenshot
@smoke
Scenario: Execute File in Terminal
# Use folders and paths with spaces.
Given a file named "run in terminal.py" is created with the following content
"""
open('log.log', 'w').write('Hello World')
"""
And a file named "log.log" does not exist
Then take a screenshot
When I open the file "run in terminal.py"
Then take a screenshot
Then wait for 1 second
When I select the command "Python: Run Python File in Terminal"
Then take a screenshot
# Wait for some time, as it could take a while for terminal to get activated.
# Slow on windows.
Then a file named "log.log" is created within 30 seconds
Scenario: Execute File within a sub directory in Terminal
# Use folders and paths with spaces.
Given a file named "hello word/run in terminal.py" is created with the following content
"""
open('log.log', 'w').write('Hello World')
"""
And a file named "hello word/log.log" does not exist
When I open the file "run in terminal.py"
And I select the command "Python: Run Python File in Terminal"
# Wait for some time, as it could take a while for terminal to get activated.
# Slow on windows.
Then a file named "log.log" is created within 20 seconds
Scenario: Execute Selection in Terminal
# Use folders and paths with spaces.
Given a file named "run in terminal.py" is created with the following content
"""
open('log1.log', 'w').write('Hello World')
open('log2.log', 'w').write('Hello World')
"""
And a file named "log1.log" does not exist
And a file named "log2.log" does not exist
When I open the file "run in terminal.py"
And I go to line 1
And I select the command "Python: Run Selection/Line in Python Terminal"
Then a file named "log1.log" is created within 20 seconds
And take a screenshot
When I go to line 2
And I select the command "Python: Run Selection/Line in Python Terminal"
# Wait for some time, as it could take a while for terminal to get activated.
# Slow on windows.
Then a file named "log2.log" is created within 20 seconds

Просмотреть файл

@ -0,0 +1,63 @@
# @terminal @terminal.venv @python3
# @https://github.com/DonJayamanne/vscode-python-uitests/terminal/execution
# Feature: Terminal (venv)
# Scenario: Interpreter display name contains the name of the venv folder
# Given a venv with the name "venv 1" is created
# When In Mac, I update the workspace setting "python.pythonPath" with the value "venv 1/bin/python"
# When In Linux, I update the workspace setting "python.pythonPath" with the value "venv 1/bin/python"
# When In Windows, I update the workspace setting "python.pythonPath" with the value "venv 1/Scripts/python.exe"
# Then the python interpreter displayed in the the status bar contains the value "venv 1" in the display name
# @preserve.workspace
# Scenario: Venv is auto selected
# Given the workspace setting "python.pythonPath" does not exist
# And the user setting "python.pythonPath" does not exist
# Then the python interpreter displayed in the the status bar does not contain the value "venv 1" in the display name
# When I reload VSC
# Then the python interpreter displayed in the the status bar contains the value "venv 1" in the display name
# @preserve.workspace
# Scenario: Venv is not auto selected (if we already have a local interpreter selected)
# Given a generic Python Interpreter is selected
# And the user setting "python.pythonPath" does not exist
# Then the python interpreter displayed in the the status bar does not contain the value "venv 1" in the display name
# When I reload VSC
# Then the python interpreter displayed in the the status bar does not contain the value "venv 1" in the display name
# @preserve.workspace
# Scenario: Venv is not auto selected (if we have a global interpreter selected)
# Given the workspace setting "python.pythonPath" does not exist
# And the user setting "python.pythonPath" exists
# Then the python interpreter displayed in the the status bar does not contain the value "venv 1" in the display name
# When I reload VSC
# Then the python interpreter displayed in the the status bar does not contain the value "venv 1" in the display name
# @preserve.workspace
# Scenario: Environment is not activated in the Terminal
# When In Mac, I update the workspace setting "python.pythonPath" with the value "venv 1/bin/python"
# When In Linux, I update the workspace setting "python.pythonPath" with the value "venv 1/bin/python"
# When In Windows, I update the workspace setting "python.pythonPath" with the value "venv 1/Scripts/python.exe"
# Given the file "write_pyPath_in_log.py" is open
# And a file named "log.log" does not exist
# And the workspace setting "python.terminal.activateEnvironment" is disabled
# And a terminal is opened
# When I send the command "python write_pyPath_in_log.py" to the terminal
# Then a file named "log.log" is created
# And open the file "log.log"
# And the file "log.log" does not contain the value "env 1"
# And take a screenshot
# @preserve.workspace
# Scenario: Environment is activated in the Terminal
# When In Mac, I update the workspace setting "python.pythonPath" with the value "venv 1/bin/python"
# When In Linux, I update the workspace setting "python.pythonPath" with the value "venv 1/bin/python"
# When In Windows, I update the workspace setting "python.pythonPath" with the value "venv 1/Scripts/python.exe"
# Given the file "write_pyPath_in_log.py" is open
# And a file named "log.log" does not exist
# And the workspace setting "python.terminal.activateEnvironment" is enabled
# And a terminal is opened
# When I send the command "python write_pyPath_in_log.py" to the terminal
# Then a file named "log.log" is created
# And open the file "log.log"
# And the file "log.log" contains the value "env 1"
# And take a screenshot

Просмотреть файл

@ -0,0 +1,70 @@
@ls
@code:code/languageServer/basic
Feature: Language Server
@smoke
Scenario Outline: Check output of 'Python' output panel when starting VS Code with Jedi <jedi_enable>d
When I <jedi_enable> the workspace setting "python.jediEnabled"
And I wait for the Python extension to activate
And I select the command "Python: Show Output"
Then the text "<first_text_in_ooutput_panel>" will be displayed in the output panel within <time_to_activate> seconds
Examples:
| jedi_enable | time_to_activate | first_text_in_ooutput_panel |
| enable | 10 | Jedi Python language engine |
| disable | 120 | Microsoft Python language server |
Scenario Outline: Language Server is downloaded with http.proxyStrictSSL setting <enabled_disabled>
When I open VS Code for the first time
And I disable the workspace setting "python.jediEnabled"
And the user setting "http.proxyStrictSSL" is <enabled_disabled>
And I wait for the Python extension to activate
And I select the command "Python: Show Output"
Then the text "Microsoft Python language server" will be displayed in the output panel within 120 seconds
When I select the command "Python: Show Language Server Output"
Then the text "<protocol_to_look_for>" will be displayed in the output panel within 120 seconds
Then the text "Initializing for" will be displayed in the output panel within 120 seconds
Examples:
| enabled_disabled | protocol_to_look_for |
| enabled | https:// |
| disabled | http:// |
@autoretry
Scenario Outline: Navigate to definition of a variable when extension has already been activated with Jedi <jedi_enable>d
When I reload VS Code
And I <jedi_enable> the workspace setting "python.jediEnabled"
And I wait for the Python extension to activate
And I select the command "Python: Show Output"
Then the text "<first_text_in_ooutput_panel>" will be displayed in the output panel within <time_to_activate> seconds
# Because LS is slow.
And wait for <time_to_activate> seconds
When I open the file "my_sample.py"
And I go to line 3, column 10
# Wait for intellisense to kick in (sometimes slow in jedi & ls)
And I wait for 10 seconds
When I select the command "Go to Definition"
Then the cursor is on line 1
Examples:
| jedi_enable | time_to_activate | first_text_in_ooutput_panel |
| enable | 10 | Jedi Python language engine |
| disable | 120 | Microsoft Python language server |
# @autoretry @wip
# Scenario Outline: Navigate to definition of a variable after opening a file with Jedi <jedi_enabled>
# Given the workspace setting "python.jediEnabled" is <jedi_enabled>
# When I open the file "my_sample.py"
# And I wait for the Python extension to activate
# And I select the command "Python: Show Output"
# Then the text "<first_text_in_ooutput_panel>" will be displayed in the output panel within <time_to_activate> seconds
# And the text "<second_text_in_output_panel>" will be displayed in the output panel within <time_to_activate> seconds
# When I go to line 3, column 10
# # Wait for intellisense to kick in (sometimes slow in jedi & ls)
# And I wait for 10 seconds
# And I select the command "Go to Definition"
# Then the cursor is on line 1
# Examples:
# | jedi_enabled | time_to_activate | first_text_in_ooutput_panel | second_text_in_output_panel |
# | enabled | 10 | Jedi Python language engine | Jedi Python language engine |
# | disabled | 120 | Microsoft Python language server | Initializing for |

Просмотреть файл

@ -0,0 +1,64 @@
@ls
@code:code/languageServer/basic
Feature: Language Server
Scenario Outline: When <reload_or_start_vs_for_first_time> with Jedi <jedi_enable>d then output contains <text_in_output_panel>
When <reload_or_start_vs_for_first_time>
And I <jedi_enable> the workspace setting "python.jediEnabled"
And I wait for the Python extension to activate
And I select the command "<output_panel_command>"
Then the text "<text_in_output_panel>" will be displayed in the output panel within <time_to_activate> seconds
Examples:
| jedi_enable | reload_or_start_vs_for_first_time | time_to_activate | text_in_output_panel | output_panel_command |
| enable | I open VS Code for the first time | 5 | Jedi Python language engine | Python: Show Output |
| enable | I reload VS Code | 5 | Jedi Python language engine | Python: Show Output |
| disable | I open VS Code for the first time | 120 | Microsoft Python language server | Python: Show Output |
| disable | I open VS Code for the first time | 120 | Downloading | Python: Show Language Server Output |
| disable | I reload VS Code | 120 | Microsoft Python language server | Python: Show Output |
@autoretry
Scenario Outline: When <reload_or_start_vs_for_first_time> with Jedi <jedi_enable>d then navigate to definition of a variable
When <reload_or_start_vs_for_first_time>
And I <jedi_enable> the workspace setting "python.jediEnabled"
And I wait for the Python extension to activate
And I select the command "<output_panel_command>"
Then the text "<text_in_output_panel>" will be displayed in the output panel within <time_to_activate> seconds
# Because LS is slow.
And wait for <time_to_activate> seconds
When I open the file "my_sample.py"
And I go to line 3, column 10
# Wait for intellisense to kick in (sometimes slow in jedi & ls)
And I wait for 5 seconds
And I select the command "Go to Definition"
Then the cursor is on line 1
Examples:
| jedi_enable | reload_or_start_vs_for_first_time | time_to_activate | text_in_output_panel | output_panel_command |
| enable | I open VS Code for the first time | 5 | Jedi Python language engine | Python: Show Output |
| enable | I reload VS Code | 5 | Jedi Python language engine | Python: Show Output |
| disable | I open VS Code for the first time | 120 | Microsoft Python language server | Python: Show Output |
| disable | I open VS Code for the first time | 120 | Downloading | Python: Show Language Server Output |
| disable | I reload VS Code | 120 | Microsoft Python language server | Python: Show Output |
@autoretry
Scenario Outline: When I open VS Code for the first time with Jedi <jedi_enable>d, open a file then navigate to definition of a variable
When I open VS Code for the first time
And I <jedi_enable> the workspace setting "python.jediEnabled"
And I select the command "Python: Show Output"
And I wait for the Python extension to activate
And I open the file "my_sample.py"
And I select the command "<output_panel_command>"
Then the text "<text_in_output_panel>" will be displayed in the output panel within <time_to_activate> seconds
# Because LS is slow.
And wait for <time_to_activate> seconds
When I go to line 3, column 10
# Wait for intellisense to kick in (sometimes slow in jedi & ls)
And I wait for 5 seconds
And I select the command "Go to Definition"
Then the cursor is on line 1
Examples:
| jedi_enable | time_to_activate | text_in_output_panel | output_panel_command |
| enable | 5 | Jedi Python language engine | Python: Show Output |
| disable | 120 | Microsoft Python language server | Python: Show Output |
| disable | 120 | Downloading | Python: Show Language Server Output |

Просмотреть файл

@ -0,0 +1,79 @@
@ls
@code:code/languageServer/basic
Feature: Language Server
@autoretry
Scenario Outline: When <reload_or_start_vs_for_first_time> with Jedi <jedi_enable>d then intellisense works
When <reload_or_start_vs_for_first_time>
And I <jedi_enable> the workspace setting "python.jediEnabled"
And I wait for the Python extension to activate
And I select the command "Python: Show Output"
Then the text "<first_text_in_ooutput_panel>" will be displayed in the output panel within <time_to_activate> seconds
And wait for <time_to_activate> seconds
# Get more realestate on UI (hide what we don't need).
And select the command "View: Close Panel"
When I open the file "intelli_sample.py"
# Wait for intellisense to kick in (sometimes slow in jedi & ls)
And I wait for <wait_time> seconds
And I go to line 3, column 13
And I press ctrl+space
Then auto completion list contains the item "excepthook"
And auto completion list contains the item "exec_prefix"
And auto completion list contains the item "executable"
When I go to line 11, column 21
And I press ctrl+space
Then auto completion list contains the item "age"
When I go to line 12, column 21
And I press ctrl+space
Then auto completion list contains the item "name"
When I go to line 17, column 10
And I press ctrl+space
Then auto completion list contains the item "say_something"
When I go to line 18, column 10
And I press ctrl+space
Then auto completion list contains the item "age"
When I go to line 19, column 10
And I press ctrl+space
Then auto completion list contains the item "name"
When I go to line 17, column 24
And I press .
Then auto completion list contains the item "capitalize"
And auto completion list contains the item "count"
Examples:
| jedi_enable | reload_or_start_vs_for_first_time | time_to_activate | first_text_in_ooutput_panel | wait_time |
| enable | I open VS Code for the first time | 5 | Jedi Python language engine | 5 |
| enable | I reload VS Code | 5 | Jedi Python language engine | 5 |
| disable | I open VS Code for the first time | 120 | Microsoft Python language server | 5 |
| disable | I reload VS Code | 120 | Microsoft Python language server | 5 |
@autoretry
Scenario Outline: When <reload_or_start_vs_for_first_time> with Jedi <jedi_enable>d then intellisense works for untitled files
When <reload_or_start_vs_for_first_time>
And I <jedi_enable> the workspace setting "python.jediEnabled"
And I wait for the Python extension to activate
And I select the command "Python: Show Output"
Then the text "<first_text_in_ooutput_panel>" will be displayed in the output panel within <time_to_activate> seconds
And wait for <time_to_activate> seconds
# Get more realestate on UI (hide what we don't need).
And select the command "View: Close Panel"
When I create a new file with the following content
"""
import sys
print(sys.executable)
"""
And I change the language of the file to "Python"
# Wait for intellisense to kick in (sometimes slow in jedi & ls)
And I wait for <wait_time> seconds
And I go to line 3, column 13
And I press ctrl+space
Then auto completion list contains the item "excepthook"
And auto completion list contains the item "exec_prefix"
And auto completion list contains the item "executable"
Examples:
| jedi_enable | reload_or_start_vs_for_first_time | time_to_activate | first_text_in_ooutput_panel | wait_time |
| enable | I open VS Code for the first time | 5 | Jedi Python language engine | 5 |
| enable | I reload VS Code | 5 | Jedi Python language engine | 5 |
| disable | I open VS Code for the first time | 120 | Microsoft Python language server | 5 |
| disable | I reload VS Code | 120 | Microsoft Python language server | 5 |

Просмотреть файл

@ -0,0 +1,47 @@
@ls
Feature: Language Server
Background: Unresolved imports
Given a file named "sample.py" is created with the following content
"""
import requests
"""
Given the workspace setting "python.jediEnabled" is disabled
Given the package "requests" is not installed
When I reload VS Code
And I open the file "sample.py"
And I wait for the Python extension to activate
And I select the command "Python: Show Output"
Then the text "Microsoft Python language server" will be displayed in the output panel within 120 seconds
And wait for 120 seconds
When I select the command "View: Focus Problems (Errors, Warnings, Infos)"
Then there is at least one problem in the problems panel
And there is a problem with the file named "sample.py"
And there is a problem with the message "unresolved import 'requests'"
Scenario: Display problem about unresolved imports
"""
Just execute the background and ensure problems are displayed.
"""
Then do nothing
Scenario: There should be no problem related to unresolved imports when reloading VSC
When I install the package "requests"
When I reload VS Code
# Wait for some time for LS to detect this.
# And I wait for 5 seconds
And I open the file "sample.py"
And I wait for the Python extension to activate
And I select the command "Python: Show Output"
Then the text "Microsoft Python language server" will be displayed in the output panel within 120 seconds
And wait for 120 seconds
When I select the command "View: Focus Problems (Errors, Warnings, Infos)"
# Ensure we are not too eager, possible LS hasn't analyzed yet.
And I wait for 10 seconds
Then there are no problems in the problems panel
@skip
Scenario: Unresolved import message should go away when package is installed
When I install the package "requests"
# Wait for some time for LS to detect this new package.
And I wait for 10 seconds
Then there are no problems in the problems panel

Просмотреть файл

@ -0,0 +1,48 @@
# Feature: Linters
# We will need to reload LS, its slow at picking missing modules when they are installed/uninstalled.
# @ls @pylint @linter @WorkspaceFolder:/Users/donjayamanne/Desktop/Development/PythonStuff/smoke_tests/env_0-virtualenv
# Scenario: Language server displays warnings
# Given the module "pylint" is not installed
# Given the module "requests" is not installed
# Given the setting "python.linting.enabled" is not enabled
# Given the setting "python.linting.pylintEnabled" is not enabled
# Given the file "pylint errors.py" is open
# Given the problems panel is open
# Then wait for 1 second
# Then there is 1 warning in the problems panel
# Then there is a warning with the message "unresolved import 'requests'" in the problems panel
# @ls @pylint @linter @WorkspaceFolder:/Users/donjayamanne/Desktop/Development/PythonStuff/smoke_tests/env_0-virtualenv
# Scenario: Pylint displays problems
# Given the module "pylint" is installed
# Given the module "requests" is not installed
# Given the setting "python.linting.enabled" is enabled
# Given the setting "python.linting.pylintEnabled" is enabled
# Given the file "pylint errors.py" is open
# Given the problems panel is open
# # Then wait for 1 second
# Then there are 2 errors in the problems panel
# Then log message "taking screenshot"
# Then take a screenshot
# # Then log message "done taking screenshot"
# # Then there is 1 warning in the problems panel
# Then there is an error with the message "Unable to import 'requests'" in the problems panel
# # Then there is an error with the message "Unable to import 'numpy'" in the problems panel
# # Then there is a warning with the message "unresolved import 'requests'" in the problems panel
# Then take a screenshot
# # @ls @pylint @linter @WorkspaceFolder:/Users/donjayamanne/Desktop/Development/PythonStuff/smoke_tests/env_0-virtualenv
# # Scenario: Pylint + LS problems vanish upon installing module
# # Given the module "pylint" is installed
# # Given the module "requests" is not installed
# # Given the file "pylint errors.py" is open
# # Given the setting "python.linting.enabled" is enabled
# # Given the setting "python.linting.pylintEnabled" is enabled
# # Given the problems panel is open
# # Then there are 2 errors in the problems panel
# # Then take a screenshot
# # When I close all editors
# # When I install the module "requests"
# # When I open the file "pylint errors.py"
# # Then there are 1 errors in the problems panel
# # Then there is an error with the message "Unable to import 'numpy'" in the problems panel

Просмотреть файл

@ -0,0 +1,51 @@
# Feature: Terminal
# @terminal @debug @WorkspaceFolder:/Users/donjayamanne/Desktop/Development/PythonStuff/smoke_tests/env_0-virtualenv
# Scenario: Activation of environment in terminal
# Given "python.terminal.activateEnvironment:true" in settings.json
# Then environment will auto-activate in the terminal
# @terminal @debug @WorkspaceFolder:/Users/donjayamanne/Desktop/Development/PythonStuff/smoke_tests/env_0-virtualenv
# Scenario: Non-activation of environment in terminal
# Given "python.terminal.activateEnvironment:false" in settings.json
# Then environment will not auto-activate in the terminal
# @terminal @debug @WorkspaceFolder:/Users/donjayamanne/Desktop/Development/PythonStuff/smoke_tests/env_0-virtualenv
# Scenario: Python file will run in activated terminal
# Given "python.terminal.activateEnvironment:true" in settings.json
# Then a python file run in the terminal will run in the activated environment
# @terminal @debug @WorkspaceFolder:/Users/donjayamanne/Desktop/Development/PythonStuff/smoke_tests/env_0-virtualenv
# Scenario: Sending lines from editor to an auto activted terminal
# Given "python.terminal.activateEnvironment:true" in settings.json
# Given the file "runSelection.py" is open
# Then log message "23241324"
# When I set cursor to line 1 of file "runSelection.py"
# When I select the command "Python: Run Selection/Line in Python Terminal"
# Then the text "Hello World!" will be displayed in the terminal
# Then the text "And hello again!" will not be displayed in the terminal
# When I press "down"
# When I select the command "Python: Run Selection/Line in Python Terminal"
# Then the text "And hello again!" will be displayed in the terminal
# @terminal @debug @WorkspaceFolder:/Users/donjayamanne/Desktop/Development/PythonStuff/smoke_tests/env_0-virtualenv
# Scenario: Sending lines from editor to terminal
# Given "python.terminal.activateEnvironment:false" in settings.json
# Given the file "runSelection.py" is open
# When I set cursor to line 1 of file "runSelection.py"
# When I select the command "Python: Run Selection/Line in Python Terminal"
# Then the text "Hello World!" will be displayed in the terminal
# Then the text "And hello again!" will not be displayed in the terminal
# When I press "down"
# When I select the command "Python: Run Selection/Line in Python Terminal"
# Then the text "And hello again!" will be displayed in the terminal
# @terminal @debug @WorkspaceFolder:/Users/donjayamanne/Desktop/Development/PythonStuff/smoke_tests/env_0-virtualenv
# Scenario: Sending multiple lines from editor to terminal
# Given "python.terminal.activateEnvironment:false" in settings.json
# Given the file "runSelection.py" is open
# When I set cursor to line 1 of file "runSelection.py"
# When I press "shift+down"
# When I press "shift+down"
# When I select the command "Python: Run Selection/Line in Python Terminal"
# Then the text "Hello World!" and "And hello again!" will be displayed in the terminal

Просмотреть файл

@ -0,0 +1,38 @@
@test
Feature: Testing
Scenario: Discover will display prompt to configure when not configured
Given the file ".vscode/settings.json" does not exist
When the Python extension has activated
And I select the command "Python: Discover Tests"
Then a message containing the text "No test framework configured" is displayed
Scenario Outline: Discover will prompt to install <package>
Given the package "<package>" is not installed
And the workspace setting "python.testing.<setting_to_enable>" is enabled
When the Python extension has activated
And I select the command "Python: Discover Tests"
Then a message containing the text "<message>" is displayed
Examples:
| package | setting_to_enable | message |
| pytest | pytestEnabled | pytest is not installed |
| nose | nosetestsEnabled | nosetest is not installed |
Scenario Outline: Discover will display prompt indicating there are no tests (<package>)
Given a file named ".vscode/settings.json" is created with the following content
"""
{
"python.testing.<args_setting>": <args>,
"python.testing.<setting_to_enable>": true
}
"""
And the package "<package>" is installed
When the Python extension has activated
And I select the command "Python: Discover Tests"
Then a message containing the text "No tests discovered" is displayed
Examples:
| package | setting_to_enable | args_setting | args |
| unittest | unittestEnabled | unittestArgs | ["-v","-s",".","-p","*test*.py"] |
| pytest | pytestEnabled | pytestArgs | ["."] |
| nose | nosetestsEnabled | nosetestArgs | ["."] |

Просмотреть файл

@ -0,0 +1,82 @@
@testing
@https://github.com/DonJayamanne/pyvscSmokeTesting/testing
Feature: Test Explorer
Background: Activted Extension
Given a file named ".vscode/settings.json" is created with the following content
"""
{
"python.testing.unittestArgs": [
"-v",
"-s",
"./tests",
"-p",
"test_*.py"
],
"python.testing.unittestEnabled": false,
"python.testing.pytestArgs": [
"."
],
"python.testing.pytestEnabled": false,
"python.testing.nosetestArgs": [
"."
],
"python.testing.nosetestsEnabled": false
}
"""
Given the Python extension has been activated
Scenario Outline: Explorer icon will be displayed when tests are discovered (<package>)
Given the package "<package>" is installed
And the workspace setting "python.testing.<setting_to_enable>" is enabled
When I select the command "Python: Discover Tests"
And I wait for test discovery to complete
Then the test explorer icon will be visible
Examples:
| package | setting_to_enable |
| unittest | unittestEnabled |
| pytest | pytestEnabled |
| nose | nosetestsEnabled |
Scenario Outline: All expected items (nodes) are displayed in the test explorer (<package>)
Given the package "<package>" is installed
And the workspace setting "python.testing.<setting_to_enable>" is enabled
When I select the command "Python: Discover Tests"
And I wait for test discovery to complete
Then the test explorer icon will be visible
When I select the command "View: Show Test"
And I expand all of the nodes in the test explorer
Then there are <node_count> nodes in the test explorer
Examples:
| package | setting_to_enable | node_count |
| unittest | unittestEnabled | 14 |
| pytest | pytestEnabled | 15 |
| nose | nosetestsEnabled | 14 |
Scenario Outline: When discovering tests, the nodes will have the progress icon and clicking stop will stop discovery (<package>)
Given the package "<package>" is installed
And the workspace setting "python.testing.<setting_to_enable>" is enabled
When I select the command "Python: Discover Tests"
And I wait for test discovery to complete
Then the test explorer icon will be visible
When I select the command "View: Show Test"
And I expand all of the nodes in the test explorer
Then there are <node_count> nodes in the test explorer
# Now, add a delay for the discovery of the tests
# This way, we have enough time to test visibility of UI elements & the like.
Given a file named "tests/test_discovery_delay" is created with the following content
"""
10
"""
When I select the command "Python: Discover Tests"
Then all of the test tree nodes have a progress icon
And the stop icon is visible in the toolbar
When I stop discovering tests
Then the stop icon is not visible in the toolbar
Examples:
| package | setting_to_enable | node_count |
| unittest | unittestEnabled | 14 |
| pytest | pytestEnabled | 15 |
| nose | nosetestsEnabled | 14 |

Просмотреть файл

@ -0,0 +1,79 @@
@testing
@https://github.com/DonJayamanne/pyvscSmokeTesting/testing
Feature: Test Explorer (code nav)
Background: Activted Extension
Given a file named ".vscode/settings.json" is created with the following content
"""
{
"python.testing.unittestArgs": [
"-v",
"-s",
"./tests",
"-p",
"test_*.py"
],
"python.testing.unittestEnabled": false,
"python.testing.pytestArgs": [
"."
],
"python.testing.pytestEnabled": false,
"python.testing.nosetestArgs": [
"."
],
"python.testing.nosetestsEnabled": false
}
"""
Given the Python extension has been activated
Scenario Outline: When navigating to a test file, suite & test, then open the file and set the cursor at the right line (<package>)
Given the package "<package>" is installed
And the workspace setting "python.testing.<setting_to_enable>" is enabled
When I select the command "Python: Discover Tests"
And I wait for test discovery to complete
Then the test explorer icon will be visible
When I select the command "View: Show Test"
And I expand all of the nodes in the test explorer
And I navigate to the code associated with the test node "<node_label>"
Then the file "<file>" is opened
And <optionally_check_line>
Examples:
| package | setting_to_enable | node_label | file | optionally_check_line |
| unittest | unittestEnabled | test_one.py | test_one.py | do nothing |
| unittest | unittestEnabled | test_one_first_suite | test_one.py | the cursor is on line 20 |
| unittest | unittestEnabled | test_three_first_suite | test_one.py | the cursor is on line 30 |
| unittest | unittestEnabled | test_two_first_suite | test_one.py | the cursor is on line 25 |
| pytest | pytestEnabled | test_one.py | test_one.py | do nothing |
| pytest | pytestEnabled | test_one_first_suite | test_one.py | the cursor is on line 20 |
| pytest | pytestEnabled | test_three_first_suite | test_one.py | the cursor is on line 30 |
| pytest | pytestEnabled | test_two_first_suite | test_one.py | the cursor is on line 25 |
| nose | nosetestsEnabled | tests/test_one.py | test_one.py | do nothing |
| nose | nosetestsEnabled | test_one_first_suite | test_one.py | the cursor is on line 20 |
| nose | nosetestsEnabled | test_three_first_suite | test_one.py | the cursor is on line 30 |
| nose | nosetestsEnabled | test_two_first_suite | test_one.py | the cursor is on line 25 |
Scenario Outline: When selecting a node, then open the file (<package>)
Given the package "<package>" is installed
And the workspace setting "python.testing.<setting_to_enable>" is enabled
When I select the command "Python: Discover Tests"
And I wait for test discovery to complete
Then the test explorer icon will be visible
When I select the command "View: Show Test"
And I expand all of the nodes in the test explorer
When I click the test node with the label "<node_label>"
Then the file "<file>" is opened
Examples:
| package | setting_to_enable | node_label | file |
| unittest | unittestEnabled | TestFirstSuite | test_one.py |
| unittest | unittestEnabled | test_one_first_suite | test_one.py |
| unittest | unittestEnabled | test_three_first_suite | test_one.py |
| unittest | unittestEnabled | test_two_third_suite | test_two.py |
| pytest | pytestEnabled | TestFirstSuite | test_one.py |
| pytest | pytestEnabled | test_one_first_suite | test_one.py |
| pytest | pytestEnabled | test_three_first_suite | test_one.py |
| pytest | pytestEnabled | test_two_third_suite | test_two.py |
| nose | nosetestsEnabled | TestFirstSuite | test_one.py |
| nose | nosetestsEnabled | test_one_first_suite | test_one.py |
| nose | nosetestsEnabled | test_three_first_suite | test_one.py |
| nose | nosetestsEnabled | test_two_third_suite | test_two.py |

Просмотреть файл

@ -0,0 +1,133 @@
@testing
@https://github.com/DonJayamanne/pyvscSmokeTesting/testing
Feature: Test Explorer (debugging)
Background: Activted Extension
Given a file named ".vscode/settings.json" is created with the following content
"""
{
"python.testing.unittestArgs": [
"-v",
"-s",
"./tests",
"-p",
"test_*.py"
],
"python.testing.unittestEnabled": false,
"python.testing.pytestArgs": ["."],
"python.testing.pytestEnabled": false,
"python.testing.nosetestArgs": ["."],
"python.testing.nosetestsEnabled": false
}
"""
Given the Python extension has been activated
Scenario Outline: When debugging tests, the nodes will have the progress icon and clicking stop will stop the debugger (<package>)
Given the package "<package>" is installed
And the workspace setting "python.testing.<setting_to_enable>" is enabled
When I select the command "Python: Discover Tests"
And I wait for test discovery to complete
Then the test explorer icon will be visible
When I select the command "View: Show Test"
And I expand all of the nodes in the test explorer
# The number entered in this file will be used in a `time.sleep(?)` statement.
# Resulting in delays in running the tests (delay is in the python code in the above repo).
Given a file named "tests/test_running_delay" is created with the following content
"""
30
"""
Then there are <node_count> nodes in the test explorer
And <node_count> nodes in the test explorer have a status of "Unknown"
When I debug the node "test_three_first_suite" from the test explorer
Then the debugger starts
When I select the command "Debug: Stop"
Then the debugger stops
Examples:
| package | setting_to_enable | node_count |
| unittest | unittestEnabled | 14 |
| pytest | pytestEnabled | 15 |
| nose | nosetestsEnabled | 14 |
Scenario Outline: When debugging tests, only the specific function will be debugged (<package>)
Given the package "<package>" is installed
And the workspace setting "python.testing.<setting_to_enable>" is enabled
When I select the command "Python: Discover Tests"
And I wait for test discovery to complete
Then the test explorer icon will be visible
When I select the command "View: Show Test"
And I expand all of the nodes in the test explorer
When I add a breakpoint to line 33 in "test_one.py"
And I add a breakpoint to line 23 in "test_one.py"
And I debug the node "test_three_first_suite" from the test explorer
Then the debugger starts
And the debugger pauses
And the current stack frame is at line 33 in "test_one.py"
When I select the command "Debug: Continue"
Then the debugger stops
Examples:
| package | setting_to_enable |
| unittest | unittestEnabled |
| pytest | pytestEnabled |
| nose | nosetestsEnabled |
Scenario Outline: When debugging tests, only the specific suite will be debugged (<package>)
Given the package "<package>" is installed
And the workspace setting "python.testing.<setting_to_enable>" is enabled
When I select the command "Python: Discover Tests"
And I wait for test discovery to complete
Then the test explorer icon will be visible
When I select the command "View: Show Test"
And I expand all of the nodes in the test explorer
When I add a breakpoint to line 33 in "test_one.py"
And I add a breakpoint to line 28 in "test_one.py"
And I add a breakpoint to line 23 in "test_one.py"
And I debug the node "TestFirstSuite" from the test explorer
Then the debugger starts
And the debugger pauses
And the current stack frame is at line 23 in "test_one.py"
When I select the command "Debug: Continue"
Then the debugger pauses
And the current stack frame is at line 33 in "test_one.py"
When I select the command "Debug: Continue"
Then the debugger pauses
And the current stack frame is at line 28 in "test_one.py"
When I select the command "Debug: Continue"
Then the debugger stops
Examples:
| package | setting_to_enable |
| unittest | unittestEnabled |
| pytest | pytestEnabled |
| nose | nosetestsEnabled |
Scenario Outline: When debugging tests, everything will be debugged (<package>)
Given the package "<package>" is installed
And the workspace setting "python.testing.<setting_to_enable>" is enabled
When I select the command "Python: Discover Tests"
And I wait for test discovery to complete
Then the test explorer icon will be visible
When I select the command "View: Show Test"
And I expand all of the nodes in the test explorer
When I add a breakpoint to line 23 in "test_one.py"
And I add a breakpoint to line 38 in "test_one.py"
And I add a breakpoint to line 23 in "test_two.py"
And I select the command "Python: Debug All Tests"
Then the debugger starts
And the debugger pauses
And the current stack frame is at line 23 in "test_one.py"
When I select the command "Debug: Continue"
Then the debugger pauses
And the current stack frame is at line 38 in "test_one.py"
When I select the command "Debug: Continue"
Then the debugger pauses
And the current stack frame is at line 23 in "test_two.py"
When I select the command "Debug: Continue"
Then the debugger stops
Examples:
| package | setting_to_enable |
| unittest | unittestEnabled |
| pytest | pytestEnabled |
| nose | nosetestsEnabled |

Просмотреть файл

@ -0,0 +1,133 @@
@testing
@https://github.com/DonJayamanne/pyvscSmokeTesting/testing
Feature: Test Explorer - Re-run Failed Tests
Background: Activted Extension
Given a file named ".vscode/settings.json" is created with the following content
"""
{
"python.testing.unittestArgs": [
"-v",
"-s",
"./tests",
"-p",
"test_*.py"
],
"python.testing.unittestEnabled": false,
"python.testing.pytestArgs": ["."],
"python.testing.pytestEnabled": false,
"python.testing.nosetestArgs": ["."],
"python.testing.nosetestsEnabled": false
}
"""
Given the Python extension has been activated
Scenario Outline: We are able to re-run a failed tests (<package>)
Given the package "<package>" is installed
And the workspace setting "python.testing.<setting_to_enable>" is enabled
When I select the command "Python: Discover Tests"
And I wait for test discovery to complete
Then the test explorer icon will be visible
When I select the command "View: Show Test"
And I expand all of the nodes in the test explorer
Then there are <node_count> nodes in the test explorer
And <node_count> nodes in the test explorer have a status of "Unknown"
Given a file named "tests/test_running_delay" is created with the following content
"""
0
"""
And a file named "tests/data.json" is created with the following content
"""
[1,-1,-1,4,5,6]
"""
When I select the command "Python: Run All Tests"
And I wait for tests to complete running
Then the node "<test_one_file_label>" in the test explorer has a status of "Fail"
And the node "TestFirstSuite" in the test explorer has a status of "Fail"
And the node "test_three_first_suite" in the test explorer has a status of "Fail"
And the node "test_two_first_suite" in the test explorer has a status of "Fail"
And the node "<test_two_file_label>" in the test explorer has a status of "Fail"
And the node "TestThirdSuite" in the test explorer has a status of "Fail"
And the node "test_three_third_suite" in the test explorer has a status of "Fail"
And the node "test_two_third_suite" in the test explorer has a status of "Fail"
And 6 nodes in the test explorer have a status of "Success"
And the run failed tests icon is visible in the toolbar
Given a file named "tests/test_running_delay" is created with the following content
"""
1
"""
And a file named "tests/data.json" is created with the following content
"""
[1,2,3,4,5,6]
"""
When I run failed tests
And I wait for tests to complete running
And I expand all of the nodes in the test explorer
Then <node_count> nodes in the test explorer have a status of "Success"
Examples:
| package | setting_to_enable | node_count | test_one_file_label | test_two_file_label |
| unittest | unittestEnabled | 14 | test_one.py | test_two.py |
| pytest | pytestEnabled | 15 | test_one.py | test_two.py |
| nose | nosetestsEnabled | 14 | tests/test_one.py | tests/test_two.py |
Scenario Outline: We are able to stop tests after re-running failed tests (<package>)
Given the package "<package>" is installed
And the workspace setting "python.testing.<setting_to_enable>" is enabled
When I select the command "Python: Discover Tests"
And I wait for test discovery to complete
Then the test explorer icon will be visible
When I select the command "View: Show Test"
And I expand all of the nodes in the test explorer
Then there are <node_count> nodes in the test explorer
And <node_count> nodes in the test explorer have a status of "Unknown"
Given a file named "tests/test_running_delay" is created with the following content
"""
0
"""
And a file named "tests/data.json" is created with the following content
"""
[1,-1,-1,4,5,6]
"""
When I select the command "Python: Run All Tests"
And I wait for tests to complete running
Then the node "<test_one_file_label>" in the test explorer has a status of "Fail"
And the node "TestFirstSuite" in the test explorer has a status of "Fail"
And the node "test_three_first_suite" in the test explorer has a status of "Fail"
And the node "test_two_first_suite" in the test explorer has a status of "Fail"
And the node "<test_two_file_label>" in the test explorer has a status of "Fail"
And the node "TestThirdSuite" in the test explorer has a status of "Fail"
And the node "test_three_third_suite" in the test explorer has a status of "Fail"
And the node "test_two_third_suite" in the test explorer has a status of "Fail"
And 6 nodes in the test explorer have a status of "Success"
And the run failed tests icon is visible in the toolbar
Given a file named "tests/test_running_delay" is created with the following content
"""
100
"""
And a file named "tests/data.json" is created with the following content
"""
[1,2,3,4,5,6]
"""
When I run failed tests
Then the stop icon is visible in the toolbar
And I expand all of the nodes in the test explorer
Then the node "TestFirstSuite" in the test explorer has a status of "Progress"
And the node "test_three_first_suite" in the test explorer has a status of "Progress"
And the node "test_two_first_suite" in the test explorer has a status of "Progress"
And the node "TestThirdSuite" in the test explorer has a status of "Progress"
And the node "test_three_third_suite" in the test explorer has a status of "Progress"
And the node "test_two_third_suite" in the test explorer has a status of "Progress"
And <failed_node_count> nodes in the test explorer have a status of "Progress"
When I stop running tests
And I wait for tests to complete running
Then the stop icon is not visible in the toolbar
And the node "test_three_first_suite" in the test explorer has a status of "Unknown"
And the node "test_two_first_suite" in the test explorer has a status of "Unknown"
And the node "test_three_third_suite" in the test explorer has a status of "Unknown"
And the node "test_two_third_suite" in the test explorer has a status of "Unknown"
Examples:
| package | setting_to_enable | node_count | failed_node_count | test_one_file_label | test_two_file_label |
| unittest | unittestEnabled | 14 | 6 | test_one.py | test_two.py |
| pytest | pytestEnabled | 15 | 6 | test_one.py | test_two.py |
| nose | nosetestsEnabled | 14 | 6 | tests/test_one.py | tests/test_two.py |

Просмотреть файл

@ -0,0 +1,46 @@
@testing
@https://github.com/DonJayamanne/pyvscSmokeTesting/testing
Feature: Test Explorer
Background: Activted Extension
Given a file named ".vscode/settings.json" is created with the following content
"""
{
"python.testing.unittestArgs": [
"-v",
"-s",
"./tests",
"-p",
"test_*.py"
],
"python.testing.unittestEnabled": false,
"python.testing.pytestArgs": ["."],
"python.testing.pytestEnabled": false,
"python.testing.nosetestArgs": ["."],
"python.testing.nosetestsEnabled": false
}
"""
Given the Python extension has been activated
Scenario Outline: When running tests, the nodes will have the progress icon and clicking stop will stop running (<package>)
Given the package "<package>" is installed
And the workspace setting "python.testing.<setting_to_enable>" is enabled
When I select the command "Python: Discover Tests"
And I wait for test discovery to complete
Then the test explorer icon will be visible
When I select the command "View: Show Test"
And I expand all of the nodes in the test explorer
And the file "tests/test_running_delay" has the following content
"""
10
"""
When I select the command "Python: Run All Tests"
Then all of the test tree nodes have a progress icon
And the stop icon is visible in the toolbar
When I stop running tests
Then the stop icon is not visible in the toolbar
Examples:
| package | setting_to_enable |
| unittest | unittestEnabled |
| pytest | pytestEnabled |
| nose | nosetestsEnabled |

Просмотреть файл

@ -0,0 +1,87 @@
@testing
@https://github.com/DonJayamanne/pyvscSmokeTesting/testing
Feature: Test Explorer
Background: Activted Extension
Given a file named ".vscode/settings.json" is created with the following content
"""
{
"python.testing.unittestArgs": [
"-v",
"-s",
"./tests",
"-p",
"test_*.py"
],
"python.testing.unittestEnabled": false,
"python.testing.pytestArgs": ["."],
"python.testing.pytestEnabled": false,
"python.testing.nosetestArgs": ["."],
"python.testing.nosetestsEnabled": false
}
"""
Given the Python extension has been activated
Scenario Outline: When running tests, the nodes will have the progress icon and when completed will have a success status (<package>)
Given the package "<package>" is installed
And a file named "tests/test_running_delay" is created with the following content
"""
5
"""
And the workspace setting "python.testing.<setting_to_enable>" is enabled
When I select the command "Python: Discover Tests"
And I wait for test discovery to complete
Then the test explorer icon will be visible
When I select the command "View: Show Test"
And I expand all of the nodes in the test explorer
Then there are <node_count> nodes in the test explorer
And <node_count> nodes in the test explorer have a status of "Unknown"
When I run the node "test_two_first_suite" from the test explorer
Then the stop icon is visible in the toolbar
And 1 node in the test explorer has a status of "Progress"
And the node "test_two_first_suite" in the test explorer has a status of "Progress"
When I wait for tests to complete running
Then the node "<test_one_file_label>" in the test explorer has a status of "Success"
And the node "TestFirstSuite" in the test explorer has a status of "Success"
And the node "test_two_first_suite" in the test explorer has a status of "Success"
And 11 nodes in the test explorer have a status of "Unknown"
Examples:
| package | setting_to_enable | node_count | test_one_file_label |
| unittest | unittestEnabled | 14 | test_one.py |
| pytest | pytestEnabled | 15 | test_one.py |
| nose | nosetestsEnabled | 14 | tests/test_one.py |
Scenario Outline: When running tests, the nodes will have the progress icon and when completed will have a error status (<package>)
Given the package "<package>" is installed
And a file named "tests/test_running_delay" is created with the following content
"""
5
"""
And a file named "tests/data.json" is created with the following content
"""
[1,2,-1,4,5,6]
"""
And the workspace setting "python.testing.<setting_to_enable>" is enabled
# Then I wait for 1000 seconds
When I select the command "Python: Discover Tests"
And I wait for test discovery to complete
Then the test explorer icon will be visible
When I select the command "View: Show Test"
And I expand all of the nodes in the test explorer
Then there are <node_count> nodes in the test explorer
And <node_count> nodes in the test explorer have a status of "Unknown"
When I run the node "test_three_first_suite" from the test explorer
Then the stop icon is visible in the toolbar
And 1 node in the test explorer has a status of "Progress"
And the node "test_three_first_suite" in the test explorer has a status of "Progress"
When I wait for tests to complete running
Then the node "<test_one_file_label>" in the test explorer has a status of "Fail"
And the node "TestFirstSuite" in the test explorer has a status of "Fail"
And the node "test_three_first_suite" in the test explorer has a status of "Fail"
And 11 nodes in the test explorer have a status of "Unknown"
Examples:
| package | setting_to_enable | node_count | test_one_file_label |
| unittest | unittestEnabled | 14 | test_one.py |
| pytest | pytestEnabled | 15 | test_one.py |
| nose | nosetestsEnabled | 14 | tests/test_one.py |

Просмотреть файл

@ -0,0 +1,146 @@
# @test
# @https://github.com/DonJayamanne/pyvscSmokeTesting.git/testing
# Feature: Test Explorer Discovering icons and stop discovery
# Scenario Outline: Can navigate to the source of a test file, and line of the suite & test function (<package>)
# Given the workspace setting "python.testing.<setting_to_enable>" is enabled
# And the package "<package>" is installed
# When I reload VS Code
# And the Python extension has been activated
# And I select the command "View: Close All Editors"
# And I select the command "Python: Discover Tests"
# And I wait for test discovery to complete
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then log the message "1"
# When I navigate to the code associated with the test node "test_one.py"
# Then log the message "2"
# Then the file "test_one.py" is opened
# When I navigate to the code associated with the test node "test_one_first_suite"
# Then the file "test_one.py" is opened
# And the cursor is on line 20
# When I navigate to the code associated with the test node "test_three_first_suite"
# Then the file "test_one.py" is opened
# And the cursor is on line 30
# When I navigate to the code associated with the test node "test_two_first_suite"
# Then the file "test_one.py" is opened
# And the cursor is on line 25
# Examples:
# | package | setting_to_enable |
# | unittest | unittestEnabled |
# # | pytest | pytestEnabled |
# # | nose | nosetestsEnabled |
# # # Scenario: When navigating to a test file, suite & test, then open the file and set the cursor at the right line (pytest)
# # # Given the package "pytest" is installed
# # # And the workspace setting "python.testing.pytestEnabled" is enabled
# # # And the workspace setting "python.testing.unittestEnabled" is disabled
# # # And the workspace setting "python.testing.nosetestsEnabled" is disabled
# # # When I reload VSC
# # # When I select the command "Python: Discover Tests"
# # # Then the test explorer icon will be visible
# # # When I select the command "View: Show Test"
# # # And I expand all of the nodes in the test explorer
# # # When I navigate to the code associated with the test node "test_one.py"
# # # Then the file "test_one.py" is opened
# # # When I navigate to the code associated with the test node "test_one_first_suite"
# # # Then the file "test_one.py" is opened
# # # And the cursor is on line 20
# # # When I navigate to the code associated with the test node "test_three_first_suite"
# # # Then the file "test_one.py" is opened
# # # And the cursor is on line 30
# # # When I navigate to the code associated with the test node "test_two_first_suite"
# # # Then the file "test_one.py" is opened
# # # And the cursor is on line 25
# # # Scenario: When navigating to a test file, suite & test, then open the file and set the cursor at the right line (nose)
# # # Given the package "nose" is installed
# # # And the workspace setting "python.testing.pytestEnabled" is disabled
# # # And the workspace setting "python.testing.unittestEnabled" is disabled
# # # And the workspace setting "python.testing.nosetestsEnabled" is enabled
# # # When I reload VSC
# # # When I select the command "Python: Discover Tests"
# # # Then the test explorer icon will be visible
# # # When I select the command "View: Show Test"
# # # And I expand all of the nodes in the test explorer
# # # When I navigate to the code associated with the test node "tests/test_one.py"
# # # Then the file "test_one.py" is opened
# # # When I navigate to the code associated with the test node "test_one_first_suite"
# # # Then the file "test_one.py" is opened
# # # And the cursor is on line 20
# # # When I navigate to the code associated with the test node "test_three_first_suite"
# # # Then the file "test_one.py" is opened
# # # And the cursor is on line 30
# # # When I navigate to the code associated with the test node "test_two_first_suite"
# # # Then the file "test_one.py" is opened
# # # And the cursor is on line 25
# # # Scenario: When selecting a node, then open the file (unitest)
# # # Given the workspace setting "python.testing.pytestEnabled" is disabled
# # # And the workspace setting "python.testing.unittestEnabled" is enabled
# # # And the workspace setting "python.testing.nosetestsEnabled" is disabled
# # # And the command "View: Close All Editors" is selected
# # # When I reload VSC
# # # When I select the command "Python: Discover Tests"
# # # Then the test explorer icon will be visible
# # # When I select the command "View: Show Test"
# # # And I expand all of the nodes in the test explorer
# # # When I click the test node with the label "TestFirstSuite"
# # # Then the file "test_one.py" is opened
# # # Given the command "View: Close All Editors" is selected
# # # When I click the test node with the label "test_one_first_suite"
# # # Then the file "test_one.py" is opened
# # # Given the command "View: Close All Editors" is selected
# # # When I click the test node with the label "test_three_first_suite"
# # # Then the file "test_one.py" is opened
# # # Given the command "View: Close All Editors" is selected
# # # When I click the test node with the label "test_two_third_suite"
# # # Then the file "test_two.py" is opened
# # # Scenario: When selecting a node, then open the file (pytest)
# # # Given the package "pytest" is installed
# # # And the workspace setting "python.testing.pytestEnabled" is enabled
# # # And the workspace setting "python.testing.unittestEnabled" is disabled
# # # And the workspace setting "python.testing.nosetestsEnabled" is disabled
# # # When I reload VSC
# # # When I select the command "Python: Discover Tests"
# # # Then the test explorer icon will be visible
# # # When I select the command "View: Show Test"
# # # And I expand all of the nodes in the test explorer
# # # When I click the test node with the label "TestFirstSuite"
# # # Then the file "test_one.py" is opened
# # # Given the command "View: Close All Editors" is selected
# # # When I click the test node with the label "test_one_first_suite"
# # # Then the file "test_one.py" is opened
# # # Given the command "View: Close All Editors" is selected
# # # When I click the test node with the label "test_three_first_suite"
# # # Then the file "test_one.py" is opened
# # # Given the command "View: Close All Editors" is selected
# # # When I click the test node with the label "test_two_third_suite"
# # # Then the file "test_two.py" is opened
# # # Scenario: When selecting a node, then open the file (nose)
# # # Given the package "nose" is installed
# # # And the workspace setting "python.testing.pytestEnabled" is disabled
# # # And the workspace setting "python.testing.unittestEnabled" is disabled
# # # And the workspace setting "python.testing.nosetestsEnabled" is enabled
# # # When I reload VSC
# # # When I select the command "Python: Discover Tests"
# # # Then the test explorer icon will be visible
# # # When I select the command "View: Show Test"
# # # And I expand all of the nodes in the test explorer
# # # When I click the test node with the label "TestFirstSuite"
# # # Then the file "test_one.py" is opened
# # # Given the command "View: Close All Editors" is selected
# # # When I click the test node with the label "test_one_first_suite"
# # # Then the file "test_one.py" is opened
# # # Given the command "View: Close All Editors" is selected
# # # When I click the test node with the label "test_three_first_suite"
# # # Then the file "test_one.py" is opened
# # # Given the command "View: Close All Editors" is selected
# # # When I click the test node with the label "test_two_third_suite"
# # # Then the file "test_two.py" is opened

Просмотреть файл

@ -0,0 +1,278 @@
# @test
# @https://github.com/DonJayamanne/pyvscSmokeTesting.git
# Feature: Test Explorer Discovering icons and stop discovery
# Scenario: When debugging tests, the nodes will have the progress icon and clicking stop will stop the debugger (unitest)
# Given the workspace setting "python.testing.pytestEnabled" is disabled
# And the workspace setting "python.testing.unittestEnabled" is enabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# And a file named "tests/test_running_delay" is created with the following content
# """
# 5
# """
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 14 nodes in the test explorer
# And 14 nodes in the test explorer have a status of "Unknown"
# When I debug the node "test_three_first_suite" from the test explorer
# Then the debugger starts
# When I select the command "Debug: Stop"
# Then the debugger stops
# Scenario: When debugging tests, the nodes will have the progress icon and clicking stop will stop the debugger (pytest)
# Given the package "pytest" is installed
# And the workspace setting "python.testing.pytestEnabled" is enabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# And a file named "tests/test_running_delay" is created with the following content
# """
# 5
# """
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 15 nodes in the test explorer
# And 15 nodes in the test explorer have a status of "Unknown"
# When I debug the node "test_three_first_suite" from the test explorer
# Then the debugger starts
# When I select the command "Debug: Stop"
# Then the debugger stops
# Scenario: When debugging tests, the nodes will have the progress icon and clicking stop will stop the debugger (nose)
# Given the package "nose" is installed
# And the workspace setting "python.testing.pytestEnabled" is disabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is enabled
# And a file named "tests/test_running_delay" is created with the following content
# """
# 5
# """
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 14 nodes in the test explorer
# And 14 nodes in the test explorer have a status of "Unknown"
# When I debug the node "test_three_first_suite" from the test explorer
# Then the debugger starts
# When I select the command "Debug: Stop"
# Then the debugger stops
# Scenario: When debugging tests, only the specific function will be debugged (unitest)
# Given the workspace setting "python.testing.pytestEnabled" is disabled
# And the workspace setting "python.testing.unittestEnabled" is enabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# When I add a breakpoint to line 33 in "test_one.py"
# And I add a breakpoint to line 23 in "test_one.py"
# And I debug the node "test_three_first_suite" from the test explorer
# Then the debugger starts
# And the debugger pauses
# And the current stack frame is at line 33 in "test_one.py"
# When I select the command "Debug: Continue"
# Then the debugger stops
# Scenario: When debugging tests, only the specific function will be debugged (pytest)
# Given the package "pytest" is installed
# And the workspace setting "python.testing.pytestEnabled" is enabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# When I add a breakpoint to line 33 in "test_one.py"
# And I add a breakpoint to line 23 in "test_one.py"
# And I debug the node "test_three_first_suite" from the test explorer
# Then the debugger starts
# And the debugger pauses
# And the current stack frame is at line 33 in "test_one.py"
# When I select the command "Debug: Continue"
# Then the debugger stops
# Scenario: When debugging tests, only the specific function will be debugged (nose)
# Given the package "nose" is installed
# And the workspace setting "python.testing.pytestEnabled" is disabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is enabled
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# When I add a breakpoint to line 33 in "test_one.py"
# And I add a breakpoint to line 23 in "test_one.py"
# And I debug the node "test_three_first_suite" from the test explorer
# Then the debugger starts
# And the debugger pauses
# And the current stack frame is at line 33 in "test_one.py"
# When I select the command "Debug: Continue"
# Then the debugger stops
# Scenario: When debugging tests, only the specific suite will be debugged (unitest)
# Given the workspace setting "python.testing.pytestEnabled" is disabled
# And the workspace setting "python.testing.unittestEnabled" is enabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# When I add a breakpoint to line 33 in "test_one.py"
# And I add a breakpoint to line 28 in "test_one.py"
# And I add a breakpoint to line 23 in "test_one.py"
# And I debug the node "TestFirstSuite" from the test explorer
# Then the debugger starts
# And the debugger pauses
# And the current stack frame is at line 23 in "test_one.py"
# When I select the command "Debug: Continue"
# Then the debugger pauses
# And the current stack frame is at line 33 in "test_one.py"
# When I select the command "Debug: Continue"
# Then the debugger pauses
# And the current stack frame is at line 28 in "test_one.py"
# When I select the command "Debug: Continue"
# Then the debugger stops
# Scenario: When debugging tests, only the specific suite will be debugged (pytest)
# Given the package "pytest" is installed
# And the workspace setting "python.testing.pytestEnabled" is enabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# When I add a breakpoint to line 33 in "test_one.py"
# And I add a breakpoint to line 28 in "test_one.py"
# And I add a breakpoint to line 23 in "test_one.py"
# And I debug the node "TestFirstSuite" from the test explorer
# Then the debugger starts
# And the debugger pauses
# And the current stack frame is at line 23 in "test_one.py"
# When I select the command "Debug: Continue"
# Then the debugger pauses
# And the current stack frame is at line 33 in "test_one.py"
# When I select the command "Debug: Continue"
# Then the debugger pauses
# And the current stack frame is at line 28 in "test_one.py"
# When I select the command "Debug: Continue"
# Then the debugger stops
# Scenario: When debugging tests, only the specific suite will be debugged (nose)
# Given the package "nose" is installed
# And the workspace setting "python.testing.pytestEnabled" is disabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is enabled
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# When I add a breakpoint to line 33 in "test_one.py"
# And I add a breakpoint to line 28 in "test_one.py"
# And I add a breakpoint to line 23 in "test_one.py"
# And I debug the node "TestFirstSuite" from the test explorer
# Then the debugger starts
# And the debugger pauses
# And the current stack frame is at line 23 in "test_one.py"
# When I select the command "Debug: Continue"
# Then the debugger pauses
# And the current stack frame is at line 33 in "test_one.py"
# When I select the command "Debug: Continue"
# Then the debugger pauses
# And the current stack frame is at line 28 in "test_one.py"
# When I select the command "Debug: Continue"
# Then the debugger stops
# Scenario: When debugging tests, everything will be debugged (unitest)
# Given the workspace setting "python.testing.pytestEnabled" is disabled
# And the workspace setting "python.testing.unittestEnabled" is enabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# When I add a breakpoint to line 23 in "test_one.py"
# And I add a breakpoint to line 38 in "test_one.py"
# And I add a breakpoint to line 23 in "test_two.py"
# And I select the command "Python: Debug All Tests"
# Then the debugger starts
# And the debugger pauses
# And the current stack frame is at line 23 in "test_one.py"
# When I select the command "Debug: Continue"
# Then the debugger pauses
# And the current stack frame is at line 38 in "test_one.py"
# When I select the command "Debug: Continue"
# Then the debugger pauses
# And the current stack frame is at line 23 in "test_two.py"
# When I select the command "Debug: Continue"
# Then the debugger stops
# Scenario: When debugging tests, everything will be debugged (pytest)
# Given the package "pytest" is installed
# And the workspace setting "python.testing.pytestEnabled" is enabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# When I add a breakpoint to line 23 in "test_one.py"
# And I add a breakpoint to line 38 in "test_one.py"
# And I add a breakpoint to line 23 in "test_two.py"
# And I select the command "Python: Debug All Tests"
# Then the debugger starts
# And the debugger pauses
# And the current stack frame is at line 23 in "test_one.py"
# When I select the command "Debug: Continue"
# Then the debugger pauses
# And the current stack frame is at line 38 in "test_one.py"
# When I select the command "Debug: Continue"
# Then the debugger pauses
# And the current stack frame is at line 23 in "test_two.py"
# When I select the command "Debug: Continue"
# Then the debugger stops
# Scenario: When debugging tests, everything will be debugged (nose)
# Given the package "nose" is installed
# And the workspace setting "python.testing.pytestEnabled" is disabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is enabled
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# When I add a breakpoint to line 23 in "test_one.py"
# And I add a breakpoint to line 38 in "test_one.py"
# And I add a breakpoint to line 23 in "test_two.py"
# And I select the command "Python: Debug All Tests"
# Then the debugger starts
# And the debugger pauses
# And the current stack frame is at line 23 in "test_one.py"
# When I select the command "Debug: Continue"
# Then the debugger pauses
# And the current stack frame is at line 38 in "test_one.py"
# When I select the command "Debug: Continue"
# Then the debugger pauses
# And the current stack frame is at line 23 in "test_two.py"
# When I select the command "Debug: Continue"
# Then the debugger stops

Просмотреть файл

@ -0,0 +1,38 @@
# @test
# @https://github.com/DonJayamanne/pyvscSmokeTesting.git
# Feature: Test Explorer
# Scenario: Explorer will be displayed when tests are discovered (unitest)
# Given the workspace setting "python.testing.pytestEnabled" is disabled
# And the workspace setting "python.testing.unittestEnabled" is enabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 14 nodes in the test explorer
# Scenario: Explorer will be displayed when tests are discovered (pytest)
# Given the package "pytest" is installed
# And the workspace setting "python.testing.pytestEnabled" is enabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 15 nodes in the test explorer
# Scenario: Explorer will be displayed when tests are discovered (nose)
# Given the package "nose" is installed
# And the workspace setting "python.testing.pytestEnabled" is disabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is enabled
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 14 nodes in the test explorer

Просмотреть файл

@ -0,0 +1,71 @@
# @test
# @https://github.com/DonJayamanne/pyvscSmokeTesting.git
# Feature: Test Explorer Discovering icons and stop discovery
# Scenario: When discovering tests, the nodes will have the progress icon and clicking stop will stop discovery (unitest)
# Given the workspace setting "python.testing.pytestEnabled" is disabled
# And the workspace setting "python.testing.unittestEnabled" is enabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 14 nodes in the test explorer
# # Now, add a delay for the discovery of the tests
# Given a file named "tests/test_discovery_delay" is created with the following content
# """
# 10
# """
# When I select the command "Python: Discover Tests"
# And I wait for 1 second
# Then all of the test tree nodes have a progress icon
# And the stop icon is visible in the toolbar
# When I stop discovering tests
# Then the stop icon is not visible in the toolbar
# Scenario: When discovering tests, the nodes will have the progress icon and clicking stop will stop discovery (pytest)
# Given the package "pytest" is installed
# And the workspace setting "python.testing.pytestEnabled" is enabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 15 nodes in the test explorer
# # Now, add a delay for the discovery of the tests
# Given a file named "tests/test_discovery_delay" is created with the following content
# """
# 10
# """
# When I select the command "Python: Discover Tests"
# And I wait for 1 second
# Then all of the test tree nodes have a progress icon
# And the stop icon is visible in the toolbar
# When I stop discovering tests
# Then the stop icon is not visible in the toolbar
# Scenario: When discovering tests, the nodes will have the progress icon and clicking stop will stop discovery (nose)
# Given the package "nose" is installed
# And the workspace setting "python.testing.pytestEnabled" is enabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 15 nodes in the test explorer
# # Now, add a delay for the discovery of the tests
# Given a file named "tests/test_discovery_delay" is created with the following content
# """
# 10
# """
# When I select the command "Python: Discover Tests"
# And I wait for 1 second
# Then all of the test tree nodes have a progress icon
# And the stop icon is visible in the toolbar
# When I stop discovering tests
# Then the stop icon is not visible in the toolbar

Просмотреть файл

@ -0,0 +1,308 @@
# @test
# @https://github.com/DonJayamanne/pyvscSmokeTesting.git
# Feature: Test Explorer - Re-run Failed Tests
# Scenario: We are able to re-run a failed tests (unitest)
# Given the workspace setting "python.testing.pytestEnabled" is disabled
# And the workspace setting "python.testing.unittestEnabled" is enabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# And a file named "tests/test_running_delay" is created with the following content
# """
# 0
# """
# And a file named "tests/data.json" is created with the following content
# """
# [1,-1,-1,4,5,6]
# """
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 14 nodes in the test explorer
# And 14 nodes in the test explorer have a status of "Unknown"
# When I select the command "Python: Run All Tests"
# And I wait for tests to complete running
# Then the node "test_one.py" in the test explorer has a status of "Fail"
# And the node "TestFirstSuite" in the test explorer has a status of "Fail"
# And the node "test_three_first_suite" in the test explorer has a status of "Fail"
# And the node "test_two_first_suite" in the test explorer has a status of "Fail"
# And the node "test_two.py" in the test explorer has a status of "Fail"
# And the node "TestThirdSuite" in the test explorer has a status of "Fail"
# And the node "test_three_third_suite" in the test explorer has a status of "Fail"
# And the node "test_two_third_suite" in the test explorer has a status of "Fail"
# And 6 nodes in the test explorer have a status of "Success"
# And the run failed tests icon is visible in the toolbar
# Given a file named "tests/test_running_delay" is created with the following content
# """
# 1
# """
# And a file named "tests/data.json" is created with the following content
# """
# [1,2,3,4,5,6]
# """
# When I run failed tests
# And I wait for tests to complete running
# Then 14 nodes in the test explorer have a status of "Success"
# Scenario: We are able to re-run a failed tests (pytest)
# Given the package "pytest" is installed
# And the workspace setting "python.testing.pytestEnabled" is enabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# And a file named "tests/test_running_delay" is created with the following content
# """
# 0
# """
# And a file named "tests/data.json" is created with the following content
# """
# [1,-1,-1,4,5,6]
# """
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 15 nodes in the test explorer
# And 15 nodes in the test explorer have a status of "Unknown"
# When I select the command "Python: Run All Tests"
# And I wait for tests to complete running
# Then the node "test_one.py" in the test explorer has a status of "Fail"
# And the node "TestFirstSuite" in the test explorer has a status of "Fail"
# And the node "test_three_first_suite" in the test explorer has a status of "Fail"
# And the node "test_two_first_suite" in the test explorer has a status of "Fail"
# And the node "test_two.py" in the test explorer has a status of "Fail"
# And the node "TestThirdSuite" in the test explorer has a status of "Fail"
# And the node "test_three_third_suite" in the test explorer has a status of "Fail"
# And the node "test_two_third_suite" in the test explorer has a status of "Fail"
# And 6 nodes in the test explorer have a status of "Success"
# And the run failed tests icon is visible in the toolbar
# Given a file named "tests/test_running_delay" is created with the following content
# """
# 1
# """
# And a file named "tests/data.json" is created with the following content
# """
# [1,2,3,4,5,6]
# """
# When I run failed tests
# And I wait for tests to complete running
# Then 15 nodes in the test explorer have a status of "Success"
# Scenario: We are able to re-run a failed tests (nose)
# Given the package "nose" is installed
# And the workspace setting "python.testing.pytestEnabled" is disabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is enabled
# And a file named "tests/test_running_delay" is created with the following content
# """
# 0
# """
# And a file named "tests/data.json" is created with the following content
# """
# [1,-1,-1,4,5,6]
# """
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 14 nodes in the test explorer
# And 14 nodes in the test explorer have a status of "Unknown"
# When I select the command "Python: Run All Tests"
# And I wait for tests to complete running
# Then the node "tests/test_one.py" in the test explorer has a status of "Fail"
# And the node "TestFirstSuite" in the test explorer has a status of "Fail"
# And the node "test_three_first_suite" in the test explorer has a status of "Fail"
# And the node "test_two_first_suite" in the test explorer has a status of "Fail"
# And the node "tests/test_two.py" in the test explorer has a status of "Fail"
# And the node "TestThirdSuite" in the test explorer has a status of "Fail"
# And the node "test_three_third_suite" in the test explorer has a status of "Fail"
# And the node "test_two_third_suite" in the test explorer has a status of "Fail"
# And 6 nodes in the test explorer have a status of "Success"
# And the run failed tests icon is visible in the toolbar
# Given a file named "tests/test_running_delay" is created with the following content
# """
# 1
# """
# And a file named "tests/data.json" is created with the following content
# """
# [1,2,3,4,5,6]
# """
# When I run failed tests
# And I wait for tests to complete running
# Then 14 nodes in the test explorer have a status of "Success"
# Scenario: We are able to stop tests after re-running failed tests (unitest)
# Given the workspace setting "python.testing.pytestEnabled" is disabled
# And the workspace setting "python.testing.unittestEnabled" is enabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# And a file named "tests/test_running_delay" is created with the following content
# """
# 0
# """
# And a file named "tests/data.json" is created with the following content
# """
# [1,-1,-1,4,5,6]
# """
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 14 nodes in the test explorer
# And 14 nodes in the test explorer have a status of "Unknown"
# When I select the command "Python: Run All Tests"
# And I wait for tests to complete running
# Then the node "test_one.py" in the test explorer has a status of "Fail"
# And the node "TestFirstSuite" in the test explorer has a status of "Fail"
# And the node "test_three_first_suite" in the test explorer has a status of "Fail"
# And the node "test_two_first_suite" in the test explorer has a status of "Fail"
# And the node "test_two.py" in the test explorer has a status of "Fail"
# And the node "TestThirdSuite" in the test explorer has a status of "Fail"
# And the node "test_three_third_suite" in the test explorer has a status of "Fail"
# And the node "test_two_third_suite" in the test explorer has a status of "Fail"
# And 6 nodes in the test explorer have a status of "Success"
# And the run failed tests icon is visible in the toolbar
# Given a file named "tests/test_running_delay" is created with the following content
# """
# 100
# """
# And a file named "tests/data.json" is created with the following content
# """
# [1,2,3,4,5,6]
# """
# When I run failed tests
# And I wait for 1 seconds
# Then the stop icon is visible in the toolbar
# Then the node "TestFirstSuite" in the test explorer has a status of "Progress"
# And the node "test_three_first_suite" in the test explorer has a status of "Progress"
# And the node "test_two_first_suite" in the test explorer has a status of "Progress"
# And the node "TestThirdSuite" in the test explorer has a status of "Progress"
# And the node "test_three_third_suite" in the test explorer has a status of "Progress"
# And the node "test_two_third_suite" in the test explorer has a status of "Progress"
# And 6 nodes in the test explorer have a status of "Progress"
# When I stop running tests
# And I wait for tests to complete running
# Then the stop icon is not visible in the toolbar
# And the node "test_three_first_suite" in the test explorer has a status of "Unknown"
# And the node "test_two_first_suite" in the test explorer has a status of "Unknown"
# And the node "test_three_third_suite" in the test explorer has a status of "Unknown"
# And the node "test_two_third_suite" in the test explorer has a status of "Unknown"
# Scenario: We are able to stop tests after re-running failed tests (pytest)
# Given the package "pytest" is installed
# And the workspace setting "python.testing.pytestEnabled" is enabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# And a file named "tests/test_running_delay" is created with the following content
# """
# 0
# """
# And a file named "tests/data.json" is created with the following content
# """
# [1,-1,-1,4,5,6]
# """
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 15 nodes in the test explorer
# And 15 nodes in the test explorer have a status of "Unknown"
# When I select the command "Python: Run All Tests"
# And I wait for tests to complete running
# Then the node "test_one.py" in the test explorer has a status of "Fail"
# And the node "TestFirstSuite" in the test explorer has a status of "Fail"
# And the node "test_three_first_suite" in the test explorer has a status of "Fail"
# And the node "test_two_first_suite" in the test explorer has a status of "Fail"
# And the node "test_two.py" in the test explorer has a status of "Fail"
# And the node "TestThirdSuite" in the test explorer has a status of "Fail"
# And the node "test_three_third_suite" in the test explorer has a status of "Fail"
# And the node "test_two_third_suite" in the test explorer has a status of "Fail"
# And 6 nodes in the test explorer have a status of "Success"
# And the run failed tests icon is visible in the toolbar
# Given a file named "tests/test_running_delay" is created with the following content
# """
# 100
# """
# And a file named "tests/data.json" is created with the following content
# """
# [1,2,3,4,5,6]
# """
# When I run failed tests
# And I wait for 1 seconds
# Then the stop icon is visible in the toolbar
# Then the node "TestFirstSuite" in the test explorer has a status of "Progress"
# And the node "test_three_first_suite" in the test explorer has a status of "Progress"
# And the node "test_two_first_suite" in the test explorer has a status of "Progress"
# And the node "TestThirdSuite" in the test explorer has a status of "Progress"
# And the node "test_three_third_suite" in the test explorer has a status of "Progress"
# And the node "test_two_third_suite" in the test explorer has a status of "Progress"
# And 6 nodes in the test explorer have a status of "Progress"
# When I stop running tests
# And I wait for tests to complete running
# Then the stop icon is not visible in the toolbar
# And the node "test_three_first_suite" in the test explorer has a status of "Unknown"
# And the node "test_two_first_suite" in the test explorer has a status of "Unknown"
# And the node "test_three_third_suite" in the test explorer has a status of "Unknown"
# And the node "test_two_third_suite" in the test explorer has a status of "Unknown"
# Scenario: We are able to stop tests after re-running failed tests (nose)
# Given the package "nose" is installed
# And the workspace setting "python.testing.pytestEnabled" is disabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is enabled
# And a file named "tests/test_running_delay" is created with the following content
# """
# 0
# """
# And a file named "tests/data.json" is created with the following content
# """
# [1,-1,-1,4,5,6]
# """
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 14 nodes in the test explorer
# And 14 nodes in the test explorer have a status of "Unknown"
# When I select the command "Python: Run All Tests"
# And I wait for tests to complete running
# Then the node "tests/test_one.py" in the test explorer has a status of "Fail"
# And the node "TestFirstSuite" in the test explorer has a status of "Fail"
# And the node "test_three_first_suite" in the test explorer has a status of "Fail"
# And the node "test_two_first_suite" in the test explorer has a status of "Fail"
# And the node "tests/test_two.py" in the test explorer has a status of "Fail"
# And the node "TestThirdSuite" in the test explorer has a status of "Fail"
# And the node "test_three_third_suite" in the test explorer has a status of "Fail"
# And the node "test_two_third_suite" in the test explorer has a status of "Fail"
# And 6 nodes in the test explorer have a status of "Success"
# And the run failed tests icon is visible in the toolbar
# Given a file named "tests/test_running_delay" is created with the following content
# """
# 100
# """
# And a file named "tests/data.json" is created with the following content
# """
# [1,2,3,4,5,6]
# """
# When I run failed tests
# And I wait for 1 seconds
# Then the stop icon is visible in the toolbar
# Then the node "TestFirstSuite" in the test explorer has a status of "Progress"
# And the node "test_three_first_suite" in the test explorer has a status of "Progress"
# And the node "test_two_first_suite" in the test explorer has a status of "Progress"
# And the node "TestThirdSuite" in the test explorer has a status of "Progress"
# And the node "test_three_third_suite" in the test explorer has a status of "Progress"
# And the node "test_two_third_suite" in the test explorer has a status of "Progress"
# And 6 nodes in the test explorer have a status of "Progress"
# When I stop running tests
# And I wait for tests to complete running
# Then the stop icon is not visible in the toolbar
# And the node "test_three_first_suite" in the test explorer has a status of "Unknown"
# And the node "test_two_first_suite" in the test explorer has a status of "Unknown"
# And the node "test_three_third_suite" in the test explorer has a status of "Unknown"
# And the node "test_two_third_suite" in the test explorer has a status of "Unknown"

Просмотреть файл

@ -0,0 +1,64 @@
# @test
# @https://github.com/DonJayamanne/pyvscSmokeTesting.git
# Feature: Test Explorer Running icons and stop running
# Scenario: When running tests, the nodes will have the progress icon and clicking stop will stop running (unitest)
# Given the workspace setting "python.testing.pytestEnabled" is disabled
# And the workspace setting "python.testing.unittestEnabled" is enabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# And the file "tests/test_running_delay" has the following content
# """
# 10
# """
# When I select the command "Python: Run All Tests"
# And I wait for 1 second
# Then all of the test tree nodes have a progress icon
# And the stop icon is visible in the toolbar
# When I stop running tests
# Then the stop icon is not visible in the toolbar
# Scenario: When running tests, the nodes will have the progress icon and clicking stop will stop running (pytest)
# Given the package "pytest" is installed
# And the workspace setting "python.testing.pytestEnabled" is enabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# And the file "tests/test_running_delay" has the following content
# """
# 10
# """
# When I select the command "Python: Run All Tests"
# And I wait for 1 second
# Then all of the test tree nodes have a progress icon
# And the stop icon is visible in the toolbar
# When I stop running tests
# Then the stop icon is not visible in the toolbar
# Scenario: When running tests, the nodes will have the progress icon and clicking stop will stop running (nose)
# Given the package "nose" is installed
# And the workspace setting "python.testing.pytestEnabled" is enabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# And the file "tests/test_running_delay" has the following content
# """
# 10
# """
# When I select the command "Python: Run All Tests"
# And I wait for 1 second
# Then all of the test tree nodes have a progress icon
# And the stop icon is visible in the toolbar
# When I stop running tests
# Then the stop icon is not visible in the toolbar

Просмотреть файл

@ -0,0 +1,176 @@
# @test
# @https://github.com/DonJayamanne/pyvscSmokeTesting.git
# Feature: Test Explorer Discovering icons and stop discovery
# Scenario: When running tests, the nodes will have the progress icon and when completed will have a success status (unitest)
# Given the workspace setting "python.testing.pytestEnabled" is disabled
# And the workspace setting "python.testing.unittestEnabled" is enabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# And a file named "tests/test_running_delay" is created with the following content
# """
# 5
# """
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 14 nodes in the test explorer
# And 14 nodes in the test explorer have a status of "Unknown"
# When I run the node "test_two_first_suite" from the test explorer
# And I wait for 1 seconds
# Then the stop icon is visible in the toolbar
# And 1 node in the test explorer has a status of "Progress"
# And the node "test_two_first_suite" in the test explorer has a status of "Progress"
# When I wait for tests to complete running
# Then the node "test_one.py" in the test explorer has a status of "Success"
# And the node "TestFirstSuite" in the test explorer has a status of "Success"
# And the node "test_two_first_suite" in the test explorer has a status of "Success"
# And 11 nodes in the test explorer have a status of "Unknown"
# Scenario: When running tests, the nodes will have the progress icon and when completed will have a success status (pytest)
# Given the package "pytest" is installed
# And the workspace setting "python.testing.pytestEnabled" is enabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# And a file named "tests/test_running_delay" is created with the following content
# """
# 5
# """
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 15 nodes in the test explorer
# And 15 nodes in the test explorer have a status of "Unknown"
# When I run the node "test_two_first_suite" from the test explorer
# And I wait for 1 seconds
# Then the stop icon is visible in the toolbar
# And 1 node in the test explorer has a status of "Progress"
# And the node "test_two_first_suite" in the test explorer has a status of "Progress"
# When I wait for tests to complete running
# Then the node "test_one.py" in the test explorer has a status of "Success"
# And the node "TestFirstSuite" in the test explorer has a status of "Success"
# And the node "test_two_first_suite" in the test explorer has a status of "Success"
# And 11 nodes in the test explorer have a status of "Unknown"
# Scenario: When running tests, the nodes will have the progress icon and when completed will have a success status (nose)
# Given the package "nose" is installed
# And the workspace setting "python.testing.pytestEnabled" is disabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is enabled
# And a file named "tests/test_running_delay" is created with the following content
# """
# 5
# """
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 14 nodes in the test explorer
# And 14 nodes in the test explorer have a status of "Unknown"
# When I run the node "test_two_first_suite" from the test explorer
# And I wait for 1 seconds
# Then the stop icon is visible in the toolbar
# And 1 node in the test explorer has a status of "Progress"
# And the node "test_two_first_suite" in the test explorer has a status of "Progress"
# When I wait for tests to complete running
# Then the node "tests/test_one.py" in the test explorer has a status of "Success"
# And the node "TestFirstSuite" in the test explorer has a status of "Success"
# And the node "test_two_first_suite" in the test explorer has a status of "Success"
# And 11 nodes in the test explorer have a status of "Unknown"
# Scenario: When running tests, the nodes will have the progress icon and when completed will have a error status (unitest)
# Given the workspace setting "python.testing.pytestEnabled" is disabled
# And the workspace setting "python.testing.unittestEnabled" is enabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# And a file named "tests/test_running_delay" is created with the following content
# """
# 5
# """
# And a file named "tests/data.json" is created with the following content
# """
# [1,2,-1,4,5,6]
# """
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 14 nodes in the test explorer
# And 14 nodes in the test explorer have a status of "Unknown"
# When I run the node "test_three_first_suite" from the test explorer
# And I wait for 1 seconds
# Then the stop icon is visible in the toolbar
# And 1 node in the test explorer has a status of "Progress"
# And the node "test_three_first_suite" in the test explorer has a status of "Progress"
# When I wait for tests to complete running
# Then the node "test_one.py" in the test explorer has a status of "Fail"
# And the node "TestFirstSuite" in the test explorer has a status of "Fail"
# And the node "test_three_first_suite" in the test explorer has a status of "Fail"
# And 11 nodes in the test explorer have a status of "Unknown"
# Scenario: When running tests, the nodes will have the progress icon and when completed will have a error status (pytest)
# Given the package "pytest" is installed
# And the workspace setting "python.testing.pytestEnabled" is enabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is disabled
# And a file named "tests/test_running_delay" is created with the following content
# """
# 5
# """
# And a file named "tests/data.json" is created with the following content
# """
# [1,2,-1,4,5,6]
# """
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 15 nodes in the test explorer
# And 15 nodes in the test explorer have a status of "Unknown"
# When I run the node "test_three_first_suite" from the test explorer
# And I wait for 1 seconds
# Then the stop icon is visible in the toolbar
# And 1 node in the test explorer has a status of "Progress"
# And the node "test_three_first_suite" in the test explorer has a status of "Progress"
# When I wait for tests to complete running
# Then the node "test_one.py" in the test explorer has a status of "Fail"
# And the node "TestFirstSuite" in the test explorer has a status of "Fail"
# And the node "test_three_first_suite" in the test explorer has a status of "Fail"
# And 11 nodes in the test explorer have a status of "Unknown"
# Scenario: When running tests, the nodes will have the progress icon and when completed will have a error status (nose)
# Given the package "nose" is installed
# And the workspace setting "python.testing.pytestEnabled" is disabled
# And the workspace setting "python.testing.unittestEnabled" is disabled
# And the workspace setting "python.testing.nosetestsEnabled" is enabled
# And a file named "tests/test_running_delay" is created with the following content
# """
# 5
# """
# And a file named "tests/data.json" is created with the following content
# """
# [1,2,-1,4,5,6]
# """
# When I reload VSC
# When I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# When I select the command "View: Show Test"
# And I expand all of the nodes in the test explorer
# Then there are 14 nodes in the test explorer
# And 14 nodes in the test explorer have a status of "Unknown"
# When I run the node "test_three_first_suite" from the test explorer
# And I wait for 1 seconds
# Then the stop icon is visible in the toolbar
# And 1 node in the test explorer has a status of "Progress"
# And the node "test_three_first_suite" in the test explorer has a status of "Progress"
# When I wait for tests to complete running
# Then the node "tests/test_one.py" in the test explorer has a status of "Fail"
# And the node "TestFirstSuite" in the test explorer has a status of "Fail"
# And the node "test_three_first_suite" in the test explorer has a status of "Fail"
# And 11 nodes in the test explorer have a status of "Unknown"

Просмотреть файл

@ -0,0 +1,16 @@
# @test
# @https://github.com/DonJayamanne/pyvscSmokeTesting.git
# Feature: Testing
# Scenario Outline: Explorer will be displayed when tests are discovered (<package>)
# Given the setting "python.testing.<setting_to_enable>" is enabled
# And the package "<package>" is installed
# When I reload VS Code
# And the Python extension has been activated
# And I select the command "Python: Discover Tests"
# Then the test explorer icon will be visible
# Examples:
# | package | setting_to_enable |
# | unittest | unittestEnabled |
# | pytest | pytestEnabled |
# | nose | nosetestsEnabled |

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,143 @@
# @testing @ci @debug @run
# @/Users/donjayamanne/Desktop/Development/vscode/smokeTests/testing
# Feature: Testing (run, debug, code lenses)
# Background: Set up tests
# Given the problems panel is open
# Given the workspace is based on "/Users/donjayamanne/Desktop/Development/vscode/smokeTests/testing"
# Given the file "tests/test_discovery_delay" is updated with the value "0"
# Given the file "tests/test_running_delay" is updated with the value "0"
# Given the file "tests/data.json" is updated with the value "[1,2,3,4,5,6]"
# Given the file ".vscode/launch.json" does not exist
# When I select the command "Python: Discover Unit Tests"
# Then wait for 1 second
# Then wait for the test icon to appear within 5 seconds
# When I select the command "View: Show Test"
# Then take a screenshot
# Then the toolbar button with the text "Run All Unit Tests" is visible
# Then the toolbar button with the text "Debug All Unit Tests" is visible
# Then the toolbar button with the text "Discover Unit Tests" is visible
# Then the toolbar button with the text "Show Unit Test Output" is visible
# Then the toolbar button with the text "Run Failed Unit Tests" is not visible
# Then the toolbar button with the text "Stop" is not visible
# Then expand test explorer tree
# @scenario1
# Scenario: Debug all tests and add breakpoints to two files
# Given the file "test_one.py" is open
# Given the file is scrolled to the top
# When I close all editors
# Given the file "test_one.py" is open
# When I add a breakpoint to line 22
# Given the file "test_two.py" is open
# Given the file is scrolled to the top
# When I close all editors
# Given the file "test_two.py" is open
# When I add a breakpoint to line 12
# When I close all editors
# When I select the command "Python: Debug All Unit Tests"
# Then debugger starts
# Then stack frame for file "test_one.py" and line 22 is displayed
# Then take a screenshot
# When I select the command "Debug: Continue"
# Then stack frame for file "test_two.py" and line 12 is displayed
# @scenario2
# Scenario: Debug file by clicking a node with breakpoint
# Given the file "test_one.py" is open
# Given the file is scrolled to the top
# When I close all editors
# Given the file "test_one.py" is open
# When I add a breakpoint to line 22
# When I close all editors
# When I select test tree node number 2 and press debug
# Then debugger starts
# Then stack frame for file "test_one.py" and line 22 is displayed
# @scenario3
# Scenario: Debug suite with breakpoint
# Given the file "test_one.py" is open
# Given the file is scrolled to the top
# When I close all editors
# Given the file "test_one.py" is open
# When I add a breakpoint to line 22
# When I close all editors
# When I select test tree node number 3 and press debug
# Then debugger starts
# Then stack frame for file "test_one.py" and line 22 is displayed
# @scenario3
# Scenario: Debug function with breakpoint
# Given the file "test_one.py" is open
# Given the file is scrolled to the top
# When I close all editors
# Given the file "test_one.py" is open
# When I add a breakpoint to line 22
# When I close all editors
# When I select test tree node number 4 and press debug
# Then debugger starts
# Then stack frame for file "test_one.py" and line 22 is displayed
# @scenario4
# Scenario: Code Lenses appear
# Given the file "test_one.py" is open
# Given the file is scrolled to the top
# When I close all editors
# Given the file "test_one.py" is open
# Then code lens "Run Test" is visible in 5 seconds
# Then code lens "Debug Test" is visible
# @scenario5
# Scenario: Running test suite via Code Lenses will display progress indicator on tree
# Given the file "tests/test_running_delay" is updated with the value "5"
# Given the file "test_one.py" is open
# Given the file is scrolled to the top
# When I close all editors
# Given the file "test_one.py" is open
# Then code lens "Run Test" is visible in 5 seconds
# When I click first code lens "Run Test"
# When I select the command "View: Show Test"
# Then wait for 1 second
# Then there are at least 4 running test items
# Then the toolbar button with the text "Stop" is visible
# Then stop the tests
# @scenario6
# Scenario: Running test function via Code Lenses will display progress indicator on tree
# Given the file "tests/test_running_delay" is updated with the value "5"
# Given the file "test_one.py" is open
# Given the file is scrolled to the top
# When I close all editors
# Given the file "test_one.py" is open
# Then code lens "Run Test" is visible in 5 seconds
# When I click second code lens "Run Test"
# Then wait for 1 second
# Then there are 1 running test items
# Then the toolbar button with the text "Stop" is visible
# Then take a screenshot
# Then stop the tests
# @scenario7
# Scenario: Debugging test suite via Code Lenses
# Given the file "test_one.py" is open
# Given the file is scrolled to the top
# When I close all editors
# Given the file "test_one.py" is open
# When I add a breakpoint to line 22
# When I select the command "View: Show Test"
# When I click first code lens "Debug Test"
# Then wait for 1 second
# Then debugger starts
# Then stack frame for file "test_one.py" and line 22 is displayed
# @scenario8
# Scenario: Debugging test function via Code Lenses
# Given the file "test_one.py" is open
# Given the file is scrolled to the top
# When I close all editors
# Given the file "test_one.py" is open
# When I add a breakpoint to line 22
# Then code lens "Run Test" is visible in 5 seconds
# When I click second code lens "Debug Test"
# Then wait for 1 second
# Then debugger starts
# Then stack frame for file "test_one.py" and line 22 is displayed

Просмотреть файл

@ -0,0 +1,149 @@
# @testing @ci @toolbar
# @/Users/donjayamanne/Desktop/Development/vscode/smokeTests/testing
# Feature: Testing (toolbar)
# Background: Set up tests
# Given the workspace is based on "/Users/donjayamanne/Desktop/Development/vscode/smokeTests/testing"
# Given the file "tests/test_discovery_delay" is updated with the value "0"
# Given the file "tests/test_running_delay" is updated with the value "0"
# Given the file "tests/data.json" is updated with the value "[1,2,3,4,5,6]"
# When I select the command "Python: Discover Unit Tests"
# Then wait for 1 second
# Then wait for the test icon to appear within 5 seconds
# When I select the command "View: Show Test"
# Then take a screenshot
# Then the toolbar button with the text "Run All Unit Tests" is visible
# Then the toolbar button with the text "Debug All Unit Tests" is visible
# Then the toolbar button with the text "Discover Unit Tests" is visible
# Then the toolbar button with the text "Show Unit Test Output" is visible
# Then the toolbar button with the text "Run Failed Unit Tests" is not visible
# Then the toolbar button with the text "Stop" is not visible
# Then expand test explorer tree
# @scenario1
# Scenario: Icons with no failures
# When I select the command "Python: Run All Unit Tests"
# Then wait for 1 second
# Then wait for the toolbar button with the text "Run All Unit Tests" to appear within 5 seconds
# Then the toolbar button with the text "Run All Unit Tests" is visible
# Then the toolbar button with the text "Debug All Unit Tests" is visible
# Then the toolbar button with the text "Discover Unit Tests" is visible
# Then the toolbar button with the text "Show Unit Test Output" is visible
# Then the toolbar button with the text "Run Failed Unit Tests" is not visible
# Then the toolbar button with the text "Stop" is not visible
# @scenario2
# Scenario: Icons with failures and then no failures
# Given the file "tests/data.json" is updated with the value "[0,2,3,4,5,6]"
# When I select the command "Python: Run All Unit Tests"
# Then wait for the toolbar button with the text "Run All Unit Tests" to appear within 5 seconds
# Then the toolbar button with the text "Run All Unit Tests" is visible
# Then the toolbar button with the text "Debug All Unit Tests" is visible
# Then the toolbar button with the text "Discover Unit Tests" is visible
# Then the toolbar button with the text "Show Unit Test Output" is visible
# Then the toolbar button with the text "Run Failed Unit Tests" is visible
# Then the toolbar button with the text "Stop" is not visible
# Given the file "tests/data.json" is updated with the value "[1,2,3,4,5,6]"
# When I select the command "Python: Run All Unit Tests"
# Then wait for 1 second
# Then wait for the toolbar button with the text "Run All Unit Tests" to appear within 5 seconds
# Then the toolbar button with the text "Run All Unit Tests" is visible
# Then the toolbar button with the text "Debug All Unit Tests" is visible
# Then the toolbar button with the text "Discover Unit Tests" is visible
# Then the toolbar button with the text "Show Unit Test Output" is visible
# Then the toolbar button with the text "Run Failed Unit Tests" is not visible
# Then the toolbar button with the text "Stop" is not visible
# @scenario3
# Scenario: Icons while discovering
# When I update file "tests/test_discovery_delay" with value "3"
# When I select the command "Python: Discover Unit Tests"
# Then wait for 1 second
# Then the toolbar button with the text "Run All Unit Tests" is not visible
# Then the toolbar button with the text "Debug All Unit Tests" is not visible
# # The `Discover Unit Tests` is still visible with a progress icon.
# # Probably, we should change the tooltip at this point to `Discovering Tests`
# Then the toolbar button with the text "Discover Unit Tests" is visible
# Then the toolbar button with the text "Show Unit Test Output" is visible
# Then the toolbar button with the text "Run Failed Unit Tests" is not visible
# Then the toolbar button with the text "Stop" is visible
# Then take a screenshot
# Then wait for the toolbar button with the text "Run All Unit Tests" to appear within 10 seconds
# Then the toolbar button with the text "Stop" is not visible
# @scenario4
# Scenario: Icons while running
# When I update file "tests/test_running_delay" with value "3"
# When I select the command "Python: Run All Unit Tests"
# Then wait for 1 second
# Then the toolbar button with the text "Run All Unit Tests" is not visible
# Then the toolbar button with the text "Debug All Unit Tests" is not visible
# Then the toolbar button with the text "Discover Unit Tests" is not visible
# Then the toolbar button with the text "Show Unit Test Output" is visible
# Then the toolbar button with the text "Run Failed Unit Tests" is not visible
# Then the toolbar button with the text "Stop" is visible
# Then take a screenshot
# Then wait for the toolbar button with the text "Run All Unit Tests" to appear within 10 seconds
# Then the toolbar button with the text "Stop" is not visible
# @scenario5
# Scenario: Stop discovering slow tests
# When I update file "tests/test_discovery_delay" with value "10"
# When I select the command "Python: Discover Unit Tests"
# Then wait for 1 second
# Then the toolbar button with the text "Run All Unit Tests" is not visible
# Then the toolbar button with the text "Debug All Unit Tests" is not visible
# # The `Discover Unit Tests` is still visible with a progress icon.
# # Probably, we should change the tooltip at this point to `Discovering Tests`
# Then the toolbar button with the text "Discover Unit Tests" is visible
# Then the toolbar button with the text "Show Unit Test Output" is visible
# Then the toolbar button with the text "Run Failed Unit Tests" is not visible
# Then the toolbar button with the text "Stop" is visible
# Then take a screenshot
# When I stop the tests
# Then wait for 2 second
# Then the toolbar button with the text "Run All Unit Tests" is visible
# Then the toolbar button with the text "Debug All Unit Tests" is visible
# Then the toolbar button with the text "Discover Unit Tests" is visible
# Then the toolbar button with the text "Show Unit Test Output" is visible
# Then the toolbar button with the text "Run Failed Unit Tests" is not visible
# Then the toolbar button with the text "Stop" is not visible
# Then take a screenshot
# @scenario6
# Scenario: Stop slow running tests
# Given the file "tests/test_running_delay" is updated with the value "10"
# Given the file "tests/data.json" is updated with the value "[1,2,1,4,5,6]"
# When I select the command "Python: Run All Unit Tests"
# Then wait for 1 second
# Then the toolbar button with the text "Run All Unit Tests" is not visible
# Then the toolbar button with the text "Debug All Unit Tests" is not visible
# Then the toolbar button with the text "Discover Unit Tests" is not visible
# Then the toolbar button with the text "Show Unit Test Output" is visible
# Then the toolbar button with the text "Run Failed Unit Tests" is not visible
# Then the toolbar button with the text "Stop" is visible
# Then take a screenshot
# When I stop the tests
# Then wait for 2 second
# Then the toolbar button with the text "Run All Unit Tests" is visible
# Then the toolbar button with the text "Debug All Unit Tests" is visible
# Then the toolbar button with the text "Discover Unit Tests" is visible
# Then the toolbar button with the text "Show Unit Test Output" is visible
# Then the toolbar button with the text "Run Failed Unit Tests" is not visible
# Then the toolbar button with the text "Stop" is not visible
# Then take a screenshot
# @scenario7
# Scenario: Failed and success icons
# Given the file "tests/data.json" is updated with the value "[1,2,1,1,1,6]"
# When I select the command "Python: Run All Unit Tests"
# Then wait for 1 second
# Then wait for the toolbar button with the text "Run All Unit Tests" to appear within 5 seconds
# Then there are at least 4 error test items
# Then there are 5 success test items
# Then take a screenshot
# Given the file "tests/data.json" is updated with the value "[1,2,3,4,5,6]"
# When I select the command "Python: Run All Unit Tests"
# Then wait for 1 second
# Then wait for the toolbar button with the text "Run All Unit Tests" to appear within 5 seconds
# Then there are 9 success test items
# Then take a screenshot

Просмотреть файл

@ -0,0 +1,451 @@
# @testing @ci @tree
# @/Users/donjayamanne/Desktop/Development/vscode/smokeTests/testing
# Feature: Testing (tree view)
# Background: Set up tests
# Given the workspace is based on "/Users/donjayamanne/Desktop/Development/vscode/smokeTests/testing"
# Given the file "tests/test_discovery_delay" is updated with the value "0"
# Given the file "tests/test_running_delay" is updated with the value "0"
# Given the file "tests/data.json" is updated with the value "[1,2,3,4,5,6]"
# When I select the command "Python: Discover Unit Tests"
# Then wait for 1 second
# Then wait for the test icon to appear within 5 seconds
# When I select the command "View: Show Test"
# Then take a screenshot
# Then the toolbar button with the text "Run All Unit Tests" is visible
# Then the toolbar button with the text "Debug All Unit Tests" is visible
# Then the toolbar button with the text "Discover Unit Tests" is visible
# Then the toolbar button with the text "Show Unit Test Output" is visible
# Then the toolbar button with the text "Run Failed Unit Tests" is not visible
# Then the toolbar button with the text "Stop" is not visible
# Then expand test explorer tree
# @scenario1
# Scenario: Successful tree nodes
# When I select the command "Python: Run All Unit Tests"
# Then wait for 1 second
# Then wait for the toolbar button with the text "Run All Unit Tests" to appear within 5 seconds
# Then there are 9 success test items
# @scenario2
# Scenario: Running single item
# When I select the command "Python: Run All Unit Tests"
# Then wait for 1 second
# Then wait for the toolbar button with the text "Run All Unit Tests" to appear within 5 seconds
# Given the file "tests/test_running_delay" is updated with the value "2"
# When I select test tree node number 6 and press run
# Then there are at least 1 running test items
# Then there are 8 success test items
# Then take a screenshot
# Then wait for the toolbar button with the text "Run All Unit Tests" to appear within 5 seconds
# Then there are 9 success test items
# Then take a screenshot
# @scenario3
# Scenario: Running failed tests, run agian and then fix and run again
# Given the file "tests/data.json" is updated with the value "[0,0,0,4,5,6]"
# When I select the command "Python: Run All Unit Tests"
# Then wait for the toolbar button with the text "Run All Unit Tests" to appear within 5 seconds
# Then there are at least 6 error test items
# Then there are 3 success test items
# Then the toolbar button with the text "Run All Unit Tests" is visible
# Then the toolbar button with the text "Run Failed Unit Tests" is visible
# Then the toolbar button with the text "Debug All Unit Tests" is visible
# Then the toolbar button with the text "Discover Unit Tests" is visible
# Then the toolbar button with the text "Show Unit Test Output" is visible
# Given the file "tests/test_running_delay" is updated with the value "2"
# When I select the command "Python: Run Failed Unit Tests"
# Then wait for 1 second
# Then there are at least 6 running test items
# Then there are 3 success test items
# Then take a screenshot
# Then wait for the toolbar button with the text "Run All Unit Tests" to appear within 5 seconds
# Then there are at least 6 error test items
# Then there are 3 success test items
# Then take a screenshot
# Given the file "tests/test_running_delay" is updated with the value "0"
# Given the file "tests/data.json" is updated with the value "[1,2,3,4,5,6]"
# When I select the command "Python: Run Failed Unit Tests"
# Then wait for 1 second
# Then wait for the toolbar button with the text "Run All Unit Tests" to appear within 5 seconds
# Then there are 9 success test items
# Then take a screenshot
# @scenario4
# Scenario: Opens test file
# When I close all editors
# When I select test tree node number 2
# Then the file "test_one.py" will be opened
# Then take a screenshot
# When I close all editors
# When I select test tree node number 3
# Then the file "test_one.py" will be opened
# Then take a screenshot
# When I close all editors
# When I select test tree node number 11
# Then the file "test_two.py" will be opened
# Then take a screenshot
# When I close all editors
# When I select test tree node number 12
# Then the file "test_two.py" will be opened
# Then take a screenshot
# @scenario5
# Scenario: Opens test file and sets focus
# When I close all editors
# When I select test tree node number 3
# When I select test tree node number 3 and press open
# Then line 20 of file "test_one.py" will be highlighted
# Then take a screenshot
# When I close all editors
# When I select test tree node number 12
# When I select test tree node number 12 and press open
# Then the file "test_two.py" will be opened
# Then line 10 of file "test_two.py" will be highlighted
# Then take a screenshot
# # @debug
# # Scenario: Debug all tests and add breakpoints to two files
# # Given the workspace is based on "/Users/donjayamanne/Desktop/Development/vscode/smokeTests/testing"
# # Given the file "tests/test_discovery_delay" is updated with the value "0"
# # Given the file "tests/test_running_delay" is updated with the value "0"
# # Given the file "tests/data.json" is updated with the value "[1,2,3,4,5,6]"
# # Given the file ".vscode/launch.json" does not exist
# # Given the file "test_one.py" is open
# # Given the file is scrolled to the top
# # When I close all editors
# # Given the file "test_one.py" is open
# # Then wait for 1 second
# # When I add a breakpoint to line 22
# # Given the file "test_two.py" is open
# # Given the file is scrolled to the top
# # When I close all editors
# # Given the file "test_two.py" is open
# # Then wait for 1 second
# # When I add a breakpoint to line 12
# # When I close all editors
# # Then wait for 1 second
# # When I select the command "Python: Debug All Unit Tests"
# # Then debugger starts
# # Then stack frame for file "test_one.py" and line 22 is displayed
# # Then take a screenshot
# # When I select the command "Debug: Continue"
# # Then stack frame for file "test_two.py" and line 12 is displayed
# # Then take a screenshot
# # When I select the command "Debug: Continue"
# # Then wait for 1 second
# # # Continue again, as the debugger breaks into sys.exit.
# # When I select the command "Debug: Continue"
# # @debug
# # Scenario: Debug file with breakpoint
# # Given the workspace is based on "/Users/donjayamanne/Desktop/Development/vscode/smokeTests/testing"
# # Given the file "tests/test_discovery_delay" is updated with the value "0"
# # Given the file "tests/test_running_delay" is updated with the value "0"
# # Given the file "tests/data.json" is updated with the value "[1,2,3,4,5,6]"
# # Given the file ".vscode/launch.json" does not exist
# # Given the file "test_one.py" is open
# # Given the file is scrolled to the top
# # When I close all editors
# # Given the file "test_one.py" is open
# # Then wait for 1 second
# # When I add a breakpoint to line 22
# # When I close all editors
# # When I select the command "Python: Discover Unit Tests"
# # Then wait for 5 second
# # When I select the command "View: Show Test"
# # Then select first node
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "right"
# # When I select test tree node number 2
# # When I select test tree node number 2 and press debug
# # Then debugger starts
# # Then stack frame for file "test_one.py" and line 22 is displayed
# # Then take a screenshot
# # When I select the command "Debug: Continue"
# # Then wait for 1 second
# # # Continue again, as the debugger breaks into sys.exit.
# # When I select the command "Debug: Continue"
# # @debug
# # Scenario: Debug suite with breakpoint
# # Given the workspace is based on "/Users/donjayamanne/Desktop/Development/vscode/smokeTests/testing"
# # Given the file "tests/test_discovery_delay" is updated with the value "0"
# # Given the file "tests/test_running_delay" is updated with the value "0"
# # Given the file "tests/data.json" is updated with the value "[1,2,3,4,5,6]"
# # Given the file ".vscode/launch.json" does not exist
# # Given the file "test_one.py" is open
# # Given the file is scrolled to the top
# # When I close all editors
# # Given the file "test_one.py" is open
# # Then wait for 1 second
# # When I add a breakpoint to line 22
# # When I close all editors
# # When I select the command "Python: Discover Unit Tests"
# # Then wait for 5 second
# # When I select the command "View: Show Test"
# # Then select first node
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "right"
# # When I select test tree node number 3
# # When I select test tree node number 3 and press debug
# # Then debugger starts
# # Then stack frame for file "test_one.py" and line 22 is displayed
# # Then take a screenshot
# # When I select the command "Debug: Continue"
# # Then wait for 1 second
# # # Continue again, as the debugger breaks into sys.exit.
# # When I select the command "Debug: Continue"
# # @debug
# # Scenario: Debug function with breakpoint
# # Given the workspace is based on "/Users/donjayamanne/Desktop/Development/vscode/smokeTests/testing"
# # Given the file "tests/test_discovery_delay" is updated with the value "0"
# # Given the file "tests/test_running_delay" is updated with the value "0"
# # Given the file "tests/data.json" is updated with the value "[1,2,3,4,5,6]"
# # Given the file ".vscode/launch.json" does not exist
# # Given the file "test_one.py" is open
# # Given the file is scrolled to the top
# # When I close all editors
# # Given the file "test_one.py" is open
# # Then wait for 1 second
# # When I add a breakpoint to line 22
# # When I close all editors
# # When I select the command "Python: Discover Unit Tests"
# # Then wait for 5 second
# # When I select the command "View: Show Test"
# # Then select first node
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "right"
# # When I select test tree node number 4
# # When I select test tree node number 4 and press debug
# # Then debugger starts
# # Then stack frame for file "test_one.py" and line 22 is displayed
# # Then take a screenshot
# # When I select the command "Debug: Continue"
# # Then wait for 1 second
# # # Continue again, as the debugger breaks into sys.exit.
# # When I select the command "Debug: Continue"
# # @codelens
# # Scenario: Code Lenses appear
# # Given the workspace is based on "/Users/donjayamanne/Desktop/Development/vscode/smokeTests/testing"
# # Given the file "tests/test_discovery_delay" is updated with the value "0"
# # Given the file "tests/test_running_delay" is updated with the value "0"
# # When I select the command "Python: Discover Unit Tests"
# # Then wait for 5 second
# # Given the file "test_one.py" is open
# # Given the file is scrolled to the top
# # Then wait for 5 second
# # Then code lens "Run Test" is visible
# # Then code lens "Debug Test" is visible
# # @codelens
# # Scenario: Running test suite via Code Lenses will display progress indicator on tree
# # Given the workspace is based on "/Users/donjayamanne/Desktop/Development/vscode/smokeTests/testing"
# # Given the file "tests/test_discovery_delay" is updated with the value "0"
# # Given the file "tests/test_running_delay" is updated with the value "2"
# # When I select the command "Python: Discover Unit Tests"
# # Then wait for 5 second
# # When I select the command "View: Show Test"
# # Then select first node
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "right"
# # Given the file "test_one.py" is open
# # Given the file is scrolled to the top
# # Then wait for 5 second
# # When I click first code lens "Run Test"
# # Then wait for 1 second
# # Then there are at least 4 running test items
# # Then the toolbar button with the text "Stop" is visible
# # Then wait for 10 second
# # Then the toolbar button with the text "Stop" is not visible
# # @codelens
# # Scenario: Running test function via Code Lenses will display progress indicator on tree
# # Given the workspace is based on "/Users/donjayamanne/Desktop/Development/vscode/smokeTests/testing"
# # Given the file "tests/test_discovery_delay" is updated with the value "0"
# # Given the file "tests/test_running_delay" is updated with the value "2"
# # When I select the command "Python: Discover Unit Tests"
# # Then wait for 5 second
# # When I select the command "View: Show Test"
# # Then select first node
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "right"
# # Given the file "test_one.py" is open
# # Given the file is scrolled to the top
# # Then wait for 5 second
# # When I click second code lens "Run Test"
# # Then wait for 1 second
# # Then there are 1 running test items
# # Then the toolbar button with the text "Stop" is visible
# # Then wait for 10 second
# # Then the toolbar button with the text "Stop" is not visible
# # @codelens @debug
# # Scenario: Debugging test suite via Code Lenses
# # Given the workspace is based on "/Users/donjayamanne/Desktop/Development/vscode/smokeTests/testing"
# # Given the file "tests/test_discovery_delay" is updated with the value "0"
# # Given the file "tests/test_running_delay" is updated with the value "0"
# # Given the file "test_one.py" is open
# # Given the file is scrolled to the top
# # When I close all editors
# # Given the file "test_one.py" is open
# # Then wait for 1 second
# # When I add a breakpoint to line 22
# # When I add a breakpoint to line 27
# # When I add a breakpoint to line 32
# # When I close all editors
# # When I select the command "Python: Discover Unit Tests"
# # Then wait for 5 second
# # When I select the command "View: Show Test"
# # Then select first node
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "right"
# # Given the file "test_one.py" is open
# # Given the file is scrolled to the top
# # Then wait for 5 second
# # When I click first code lens "Debug Test"
# # Then wait for 1 second
# # Then debugger starts
# # Then stack frame for file "test_one.py" and line 22 is displayed
# # Then take a screenshot
# # When I select the command "Debug: Continue"
# # Then stack frame for file "test_one.py" and line 32 is displayed
# # Then take a screenshot
# # When I select the command "Debug: Continue"
# # Then stack frame for file "test_one.py" and line 27 is displayed
# # Then take a screenshot
# # When I select the command "Debug: Continue"
# # Then wait for 1 second
# # # Continue again, as the debugger breaks into sys.exit.
# # When I select the command "Debug: Continue"
# # Scenario: Debugging test function via Code Lenses
# # Given the workspace is based on "/Users/donjayamanne/Desktop/Development/vscode/smokeTests/testing"
# # Given the file "tests/test_discovery_delay" is updated with the value "0"
# # Given the file "tests/test_running_delay" is updated with the value "0"
# # Given the file "test_one.py" is open
# # Given the file is scrolled to the top
# # When I close all editors
# # Given the file "test_one.py" is open
# # Then wait for 1 second
# # When I add a breakpoint to line 22
# # When I add a breakpoint to line 27
# # When I add a breakpoint to line 32
# # When I close all editors
# # When I select the command "Python: Discover Unit Tests"
# # Then wait for 5 second
# # When I select the command "View: Show Test"
# # Then select first node
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "down"
# # When I press "right"
# # When I press "down"
# # When I press "right"
# # Given the file "test_one.py" is open
# # Given the file is scrolled to the top
# # Then wait for 5 second
# # When I click second code lens "Debug Test"
# # Then wait for 1 second
# # Then debugger starts
# # Then stack frame for file "test_one.py" and line 22 is displayed
# # Then take a screenshot
# # # Continue again, as the debugger breaks into sys.exit.
# # When I select the command "Debug: Continue"
# # When I select the command "Debug: Continue"
# # Then debugger stops

Просмотреть файл

@ -0,0 +1,17 @@
# @terminal
# Feature: Terminal
# Background: Activted Extension
# Given the Python extension has been activated
# @smoke
# Scenario: Open a terminal
# Then take a screenshot
# Then wait for 1 second
# When I select the command "Python: Create Terminal"
# Then take a screenshot
# Then wait for 1 second
# Then take a screenshot
# Then wait for 5 seconds
# Then take a screenshot
# Then wait for 5 seconds
# Then take a screenshot

8797
uitests/package-lock.json сгенерированный Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

68
uitests/package.json Normal file
Просмотреть файл

@ -0,0 +1,68 @@
{
"name": "uitests",
"version": "1.0.0",
"main": "./out/index",
"scripts": {
"compile": "tsc -watch -p ./",
"compileNoWatch": "tsc -p ./",
"download": "node ./out/index download",
"_install": "node ./out/index install",
"test": "node ./out/index test",
"lint": "tslint --project tsconfig.json"
},
"dependencies": {
"@types/chai": "^4.1.7",
"@types/chai-arrays": "^1.0.2",
"@types/chai-as-promised": "^7.1.0",
"@types/cucumber": "^4.0.7",
"@types/fs-extra": "^8.0.0",
"@types/gulp": "^4.0.6",
"@types/progress": "^2.0.3",
"@types/puppeteer-core": "^1.9.0",
"@types/request": "^2.48.2",
"@types/rimraf": "^2.0.2",
"@types/tmp": "^0.1.0",
"@types/yargs": "^13.0.0",
"chai": "^4.2.0",
"chai-array": "0.0.2",
"chai-as-promised": "^7.1.1",
"clipboardy": "^2.1.0",
"colors": "^1.3.3",
"cucumber": "^5.1.0",
"cucumber-html-reporter": "^5.0.0",
"cucumber-junit": "^1.7.1",
"cucumber-pretty": "^1.5.2",
"fs-extra": "^8.1.0",
"glob": "^7.1.4",
"gulp": "^4.0.2",
"gulp-chmod": "^3.0.0",
"gulp-filter": "^6.0.0",
"gulp-gunzip": "^1.1.0",
"gulp-untar": "0.0.8",
"gulp-vinyl-zip": "^2.1.2",
"jsonc-parser": "^2.1.0",
"multiple-cucumber-html-reporter": "^1.12.0",
"named-js-regexp": "^1.3.5",
"npm": "^6.10.3",
"prettier": "^1.18.2",
"progress": "^2.0.3",
"puppeteer-core": "^1.19.0",
"request": "^2.88.0",
"request-progress": "^3.0.0",
"rimraf": "^2.6.3",
"source-map-support": "^0.5.12",
"tmp": "^0.1.0",
"tslint": "^5.20.0",
"tslint-config-prettier": "^1.18.0",
"tslint-eslint-rules": "^5.4.0",
"tslint-microsoft-contrib": "^6.2.0",
"tslint-plugin-prettier": "^2.0.1",
"typescript": "^3.6.3",
"vinyl-fs": "^3.0.3",
"vsce": "^1.66.0",
"vscode-uri": "^2.0.3",
"winston": "^3.2.1",
"winston-transport": "^4.3.0",
"yargs": "^13.3.0"
}
}

41
uitests/src/constants.ts Normal file
Просмотреть файл

@ -0,0 +1,41 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
import * as path from 'path';
import { RetryOptions } from './helpers';
export const isCI = process.env.TRAVIS === 'true' || process.env.TF_BUILD !== undefined;
export const uitestsRootPath = path.join(__dirname, '..');
export const featurePath = path.join(uitestsRootPath, 'features');
export const vscodeTestPath = path.join(uitestsRootPath, '.vscode test');
// Assume 1 minute is enough for extension to get activated.
// Remember, activation of extension is slow on Windows.
export const extensionActivationTimeout = 60_000;
export const maxStepTimeout = 150_000;
export const maxHookTimeout = 240_000;
// Tooltip of the Statusbar created the Bootstrap extension to indicate it has activated.
export const pyBootstrapTooltip = 'Py';
// Tooltip of the Statusbar created by the Bootstrap extension when Python Extension has activated.
export const pyBootstrapActivatedStatusBarTooltip = 'Py2';
export const RetryMax30Seconds: RetryOptions = { timeout: 30_000, interval: 100 };
export const RetryMax20Seconds: RetryOptions = { timeout: 20_000, interval: 100 };
export const RetryMax10Seconds: RetryOptions = { timeout: 10_000, interval: 100 };
export const RetryMax5Seconds: RetryOptions = { timeout: 5_000, interval: 100 };
export const RetryMax2Seconds: RetryOptions = { timeout: 2_000, interval: 100 };
export const RetryMax5Times: RetryOptions = { count: 5, interval: 100 };
export const RetryMax2Times: RetryOptions = { count: 2, interval: 100 };
export const CucumberRetryMax30Seconds: {} = { wrapperOptions: { retry: RetryMax30Seconds } };
export const CucumberRetryMax20Seconds: {} = { wrapperOptions: { retry: RetryMax20Seconds } };
export const CucumberRetryMax10Seconds: {} = { wrapperOptions: { retry: RetryMax10Seconds } };
export const CucumberRetryMax5Seconds: {} = { wrapperOptions: { retry: RetryMax5Seconds } };
export const CucumberRetryMax2Seconds: {} = { wrapperOptions: { retry: RetryMax2Seconds } };
export const CucumberRetryMax5Times: {} = { wrapperOptions: { retry: RetryMax5Times } };
export const CucumberRetryMax2Times: {} = { wrapperOptions: { retry: RetryMax2Times } };
export type localizationKeys = 'debug.selectConfigurationTitle';

Просмотреть файл

@ -0,0 +1,140 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
/**
* @typedef {Object} SplitLinesOptions
* @property {boolean} [trim=true] - Whether to trim the lines.
* @property {boolean} [removeEmptyEntries=true] - Whether to remove empty entries.
*/
// https://stackoverflow.com/questions/39877156/how-to-extend-string-prototype-and-use-it-next-in-typescript
// tslint:disable-next-line:interface-name
/**
* @typedef {Object} SplitLinesOptions
* @property {boolean} [trim=true] - Whether to trim the lines.
* @property {boolean} [removeEmptyEntries=true] - Whether to remove empty entries.
*/
// https://stackoverflow.com/questions/39877156/how-to-extend-string-prototype-and-use-it-next-in-typescript
// tslint:disable-next-line:interface-name
declare interface String {
/**
* Split a string using the cr and lf characters and return them as an array.
* By default lines are trimmed and empty lines are removed.
* @param {SplitLinesOptions=} splitOptions - Options used for splitting the string.
*/
splitLines(splitOptions?: { trim: boolean; removeEmptyEntries?: boolean }): string[];
/**
* Appropriately formats a string so it can be used as an argument for a command in a shell.
* E.g. if an argument contains a space, then it will be enclosed within double quotes.
*/
toCommandArgument(): string;
/**
* Appropriately formats a a file path so it can be used as an argument for a command in a shell.
* E.g. if an argument contains a space, then it will be enclosed within double quotes.
*/
fileToCommandArgument(): string;
/**
* String.format() implementation.
* Tokens such as {0}, {1} will be replaced with corresponding positional arguments.
*/
format(...args: string[]): string;
/**
* String.trimQuotes implementation
* Removes leading and trailing quotes from a string
*/
trimQuotes(): string;
/**
* Replaces characters such as 160 with 32.
* When we get string content of html elements, we get char code 160 instead of 32.
*/
normalize(): string;
}
// Standard normalize.
const oldNormalize = String.prototype.normalize;
/**
* Replaces characters such as 160 with 32.
* When we get string content of html elements, we get char code 160 instead of 32.
*/
// tslint:disable-next-line: no-any
String.prototype.normalize = function(this: string, ...args: []): string {
const normalized = this.replace(new RegExp(String.fromCharCode(160), 'g'), String.fromCharCode(32));
return typeof oldNormalize === 'function' ? oldNormalize.apply(normalized, args) : normalized;
};
/**
* Split a string using the cr and lf characters and return them as an array.
* By default lines are trimmed and empty lines are removed.
* @param {SplitLinesOptions=} splitOptions - Options used for splitting the string.
*/
String.prototype.splitLines = function(this: string, splitOptions: { trim: boolean; removeEmptyEntries: boolean } = { removeEmptyEntries: true, trim: true }): string[] {
let lines = this.split(/\r?\n/g);
if (splitOptions && splitOptions.trim) {
lines = lines.map(line => line.trim());
}
if (splitOptions && splitOptions.removeEmptyEntries) {
lines = lines.filter(line => line.length > 0);
}
return lines;
};
/**
* Appropriately formats a string so it can be used as an argument for a command in a shell.
* E.g. if an argument contains a space, then it will be enclosed within double quotes.
* @param {String} value.
*/
String.prototype.toCommandArgument = function(this: string): string {
if (!this) {
return this;
}
return this.indexOf(' ') >= 0 && !this.startsWith('"') && !this.endsWith('"') ? `"${this}"` : this.toString();
};
/**
* Appropriately formats a a file path so it can be used as an argument for a command in a shell.
* E.g. if an argument contains a space, then it will be enclosed within double quotes.
*/
String.prototype.fileToCommandArgument = function(this: string): string {
if (!this) {
return this;
}
return this.toCommandArgument().replace(/\\/g, '/');
};
/**
* String.trimQuotes implementation
* Removes leading and trailing quotes from a string
*/
String.prototype.trimQuotes = function(this: string): string {
if (!this) {
return this;
}
return this.replace(/(^['"])|(['"]$)/g, '');
};
// tslint:disable-next-line:interface-name
declare interface Promise<T> {
/**
* Catches task error and ignores them.
*/
ignoreErrors(): void;
}
/**
* Explicitly tells that promise should be run asynchonously.
*/
Promise.prototype.ignoreErrors = function<T>(this: Promise<T>) {
// tslint:disable-next-line:no-empty
this.catch(() => {});
};
if (!String.prototype.format) {
String.prototype.format = function(this: string) {
const args = arguments;
return this.replace(/{(\d+)}/g, (match, number) => (args[number] === undefined ? match : args[number]));
};
}

Просмотреть файл

@ -0,0 +1,33 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
// tslint:disable: import-name no-console no-var-requires no-require-imports
import * as fs from 'fs-extra';
import ProgressBar from 'progress';
import * as request from 'request';
import { debug } from './logger';
const progress = require('request-progress');
const progressBar = require('progress') as typeof ProgressBar;
export async function downloadFile(url: string, targetFile: string, downloadMessage = 'Downloading') {
debug(`Downloading ${url} as ${targetFile}`);
return new Promise<void>((resolve, reject) => {
const bar = new progressBar(`${downloadMessage} [:bar]`, {
complete: '=',
incomplete: ' ',
width: 20,
total: 100
});
progress(request(url))
.on('progress', (state: { percent: number }) => bar.update(state.percent))
.on('error', reject)
.on('end', () => {
bar.update(100);
resolve();
})
.pipe(fs.createWriteStream(targetFile));
});
}

Просмотреть файл

@ -0,0 +1,9 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
export * from './http';
export * from './misc';
export * from './unzip';
export * from './types';

Просмотреть файл

@ -0,0 +1,196 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
// tslint:disable: no-any
import * as util from 'util';
import { createLogger, format, transports } from 'winston';
import * as Transport from 'winston-transport';
import { getOSType, OSType, StopWatch } from './misc';
const formatter = format.printf(({ level, message, timestamp }) => {
// Pascal casing og log level, so log files get highlighted when viewing in VSC and other editors.
return `${level.substring(0, 1).toUpperCase()}${level.substring(1)} ${timestamp}: ${message}`;
});
const consoleFormat = format.combine(
format.colorize({ all: true }),
format.timestamp({
format: 'YYYY-MM-DD HH:mm:ss'
}),
formatter
);
const fileFormat = format.combine(
format.timestamp({
format: 'YYYY-MM-DD HH:mm:ss'
}),
formatter
);
const getFormattedMessage = (...args: {}[]) => (args.length === 0 ? '' : util.format(args[0], ...args.slice(1)));
let logger = createLogger({
format: consoleFormat,
level: 'debug',
transports: [new transports.Console({ format: consoleFormat })]
});
export function info(message: string, ...args: any[]) {
logger.info(getFormattedMessage(message, ...args));
}
export function debug(message: string, ...args: any[]) {
logger.debug(getFormattedMessage(message, ...args));
}
export function warn(message: string, ...args: any[]) {
logger.warn(getFormattedMessage(message, ...args));
}
export function error(message: string, ...args: any[]) {
logger.error(getFormattedMessage(message, ...args));
}
export function initialize(verbose: boolean, filename?: string) {
const level = verbose ? 'debug' : 'info';
const loggerTransports: Transport[] = [new transports.Console({ format: consoleFormat })];
if (filename && getOSType() !== OSType.Windows) {
// Don't log to a file on windows, cuz it sucks.
// We delete the file mid testing, but the file logger on windows craps out when the file is deleted.
loggerTransports.push(new transports.File({ format: fileFormat, filename: filename }));
}
logger = createLogger({ level, transports: loggerTransports });
}
/**
* What do we want to log.
* @export
* @enum {number}
*/
export enum LogOptions {
None = 0,
Arguments = 1,
ReturnValue = 2
}
// tslint:disable-next-line:no-any
function argsToLogString(args: any[]): string {
try {
return (args || [])
.map((item, index) => {
if (item === undefined) {
return `Arg ${index + 1}: undefined`;
}
if (item === null) {
return `Arg ${index + 1}: null`;
}
try {
if (item && item.fsPath) {
return `Arg ${index + 1}: <Uri:${item.fsPath}>`;
}
return `Arg ${index + 1}: ${JSON.stringify(item)}`;
} catch {
return `Arg ${index + 1}: <argument cannot be serialized for logging>`;
}
})
.join(', ');
} catch {
return '';
}
}
// tslint:disable-next-line:no-any
function returnValueToLogString(returnValue: any): string {
const returnValueMessage = 'Return Value: ';
if (returnValue === undefined) {
return `${returnValueMessage}undefined`;
}
if (returnValue === null) {
return `${returnValueMessage}null`;
}
try {
return `${returnValueMessage}${JSON.stringify(returnValue)}`;
} catch {
return `${returnValueMessage}<Return value cannot be serialized for logging>`;
}
}
enum LogLevel {
Information = 'Information',
Error = 'Error',
Warning = 'Warning'
}
export function debugDecorator(message: string, options: LogOptions = LogOptions.Arguments | LogOptions.ReturnValue) {
return trace(message, options);
}
export function errorDecorator(message: string) {
return trace(message, LogOptions.Arguments | LogOptions.ReturnValue, LogLevel.Error);
}
export function infoDecorator(message: string) {
return trace(message);
}
export function warnDecorator(message: string) {
return trace(message, LogOptions.Arguments | LogOptions.ReturnValue, LogLevel.Warning);
}
function trace(message: string, options: LogOptions = LogOptions.None, logLevel?: LogLevel) {
// tslint:disable-next-line:no-function-expression no-any
return function(_: Object, __: string, descriptor: TypedPropertyDescriptor<any>) {
const originalMethod = descriptor.value;
// tslint:disable-next-line:no-function-expression no-any
descriptor.value = function(...args: any[]) {
const className = _ && _.constructor ? _.constructor.name : '';
// tslint:disable-next-line:no-any
function writeSuccess(elapsedTime: number, returnValue: any) {
if (logLevel === LogLevel.Error) {
return;
}
writeToLog(elapsedTime, returnValue);
}
function writeError(elapsedTime: number, ex: Error) {
writeToLog(elapsedTime, undefined, ex);
}
// tslint:disable-next-line:no-any
function writeToLog(elapsedTime: number, returnValue?: any, ex?: Error) {
const messagesToLog = [message];
messagesToLog.push(`Class name = ${className}, completed in ${elapsedTime}ms`);
if ((options && LogOptions.Arguments) === LogOptions.Arguments) {
messagesToLog.push(argsToLogString(args));
}
if ((options & LogOptions.ReturnValue) === LogOptions.ReturnValue) {
messagesToLog.push(returnValueToLogString(returnValue));
}
if (ex) {
error(messagesToLog.join(', '), ex);
} else {
info(messagesToLog.join(', '));
}
}
const timer = new StopWatch();
try {
trace(`Before ${message}`, options, logLevel);
// tslint:disable-next-line:no-invalid-this no-unsafe-any
const result = originalMethod.apply(this, args);
// If method being wrapped returns a promise then wait for it.
// tslint:disable-next-line:no-unsafe-any
if (result && typeof result.then === 'function' && typeof result.catch === 'function') {
// tslint:disable-next-line:prefer-type-cast
(result as Promise<void>)
.then(data => {
writeSuccess(timer.elapsedTime, data);
return data;
})
.catch(ex => {
writeError(timer.elapsedTime, ex);
});
} else {
writeSuccess(timer.elapsedTime, result);
}
return result;
} catch (ex) {
writeError(timer.elapsedTime, ex);
throw ex;
}
};
return descriptor;
};
}

118
uitests/src/helpers/misc.ts Normal file
Просмотреть файл

@ -0,0 +1,118 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
import { warn } from './logger';
import { RetryCounterOptions, RetryOptions, RetryTimeoutOptions } from './types';
export enum OSType {
OSX = 'OSX',
Linux = 'Linux',
Windows = 'Windows'
}
export function getOSType(): OSType {
if (/^win/.test(process.platform)) {
return OSType.Windows;
} else if (/^darwin/.test(process.platform)) {
return OSType.OSX;
} else if (/^linux/.test(process.platform)) {
return OSType.Linux;
} else {
throw new Error('Unknown OS');
}
}
export function sleep(timeout: number): Promise<void> {
return new Promise(resolve => setTimeout(resolve, timeout));
}
export function noop() {
// Do nothing.
}
export class StopWatch {
private started = new Date().getTime();
public get elapsedTime() {
return new Date().getTime() - this.started;
}
public reset() {
this.started = new Date().getTime();
}
public log(message: string): void {
// tslint:disable-next-line: no-console
console.log(`${this.elapsedTime}: ${message}`);
}
}
// tslint:disable-next-line: no-any
type AnyAsyncFunction = (...args: any[]) => Promise<any>;
type Unpacked<T> = T extends Promise<infer U> ? U : T;
// tslint:disable-next-line: no-any
/**
* Wrap a function to ensure it gets retried if there are any errors.
* @example The following example will run the inner function for a max of 10ms (will fail after 10ms as it will always throw an exception).
* retryWrapper(async ()=> { console.log('Hello'); throw new Error('kaboom');}, {timeout: 10});
*
* @export
* @template T
* @param {({} | any)} this
* @param {RetryOptions} options
* @param {T} fn
* @param {...{}[]} args
* @returns {Promise<Unpacked<ReturnType<T>>>}
*/
export async function retryWrapper<T extends AnyAsyncFunction>(
// tslint:disable-next-line: no-any
this: {} | any,
options: RetryOptions,
fn: T,
...args: {}[]
): Promise<Unpacked<ReturnType<T>>> {
const watch = new StopWatch();
const interval = options.interval || 100;
const iterations = (options as RetryTimeoutOptions).timeout ? (options as RetryTimeoutOptions).timeout / interval : (options as RetryCounterOptions).count;
const timeout = (options as RetryTimeoutOptions).timeout || (options as RetryCounterOptions).count * interval;
let lastEx: Error | undefined;
// tslint:disable-next-line: prefer-array-literal
for (const _ of [...new Array(iterations)]) {
try {
return await (fn as Function).apply(this, args);
} catch (ex) {
lastEx = ex;
if (watch.elapsedTime > timeout) {
break;
}
await sleep(interval);
continue;
}
}
if (options.logFailures !== false) {
const customMessage = options.errorMessage ? `, ${options.errorMessage}` : '';
warn(`Timeout after ${timeout}${customMessage}. Options ${JSON.stringify(options)}`, lastEx);
}
throw lastEx;
}
/**
* Retry decorator.
*
* @export
* @param {RetryOptions} [options={ timeout: 5_000, interval: 100 }]
* @returns
*/
export function retry(options: RetryOptions = { timeout: 5_000, interval: 100 }) {
// tslint:disable-next-line: no-any no-function-expression
return function(_target: any, _propertyKey: string, descriptor: PropertyDescriptor) {
const originalMethod = descriptor.value!;
descriptor.value = async function(this: {}): Promise<{}> {
const args = [].slice.call(arguments) as {}[];
return retryWrapper.bind(this)(options, originalMethod as AnyAsyncFunction, ...args);
};
return descriptor;
};
}

Просмотреть файл

@ -0,0 +1,52 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
import { exec } from 'child_process';
import { debug } from './logger';
import { sleep } from './misc';
export async function isPackageInstalled(pythonPath: string, moduleName: string): Promise<boolean> {
const cmd = `${pythonPath.toCommandArgument()} -c "import ${moduleName};print('Hello World')"`;
debug(`Executing command = ${cmd}`);
return new Promise<boolean>(resolve => {
exec(cmd, (ex, stdout: string, stdErr: string) => {
if (ex || stdErr) {
debug(`Executing command = ${cmd}, error: `, ex, stdErr);
return resolve(false);
}
debug(`Executing command = ${cmd}, output: `, stdout);
resolve(stdout.trim() === 'Hello World');
});
});
}
export async function installPackage(pythonPath: string, moduleName: string): Promise<void> {
await installOrUninstallPackage(pythonPath, moduleName, true);
}
export async function uninstallModule(pythonPath: string, moduleName: string): Promise<void> {
await installOrUninstallPackage(pythonPath, moduleName, false);
}
export async function installOrUninstallPackage(pythonPath: string, moduleName: string, install: boolean = true): Promise<void> {
const installCmd = install ? 'install' : 'uninstall';
const extraArgs = install ? [] : ['-y'];
const cmd = `${pythonPath.toCommandArgument()} -m pip ${installCmd} ${moduleName} -q --disable-pip-version-check ${extraArgs.join(' ')}`;
// tslint:disable-next-line: no-unnecessary-callback-wrapper
return new Promise<void>(resolve => exec(cmd.trim(), () => resolve()));
}
export async function ensurePackageIsInstalled(pythonPath: string, moduleName: string): Promise<void> {
const installed = await isPackageInstalled(pythonPath, moduleName);
if (!installed) {
await installPackage(pythonPath, moduleName);
await sleep(1000);
}
}
export async function ensurePackageIsNotInstalled(pythonPath: string, moduleName: string): Promise<void> {
const installed = await isPackageInstalled(pythonPath, moduleName);
debug(`Module ${moduleName} is installed = ${installed}`);
if (installed) {
await uninstallModule(pythonPath, moduleName);
await sleep(1000);
}
}

Просмотреть файл

@ -0,0 +1,99 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
import * as fs from 'fs-extra';
import * as path from 'path';
import { ITestOptions } from '../types';
import { getOSType, OSType } from './misc';
// tslint:disable: no-var-requires no-require-imports no-any
const report = require('multiple-cucumber-html-reporter');
const cucumberJunit = require('cucumber-junit');
const reporter = require('cucumber-html-reporter');
const OS = {
[OSType.Linux]: '🐧 Linux',
[OSType.OSX]: '🍎 Mac',
[OSType.Windows]: '🖥 Win'
};
export async function generateJUnitReport(options: ITestOptions, cucumberReportJsonFilePath: string) {
const content = await fs.readFile(cucumberReportJsonFilePath);
const xml = cucumberJunit(content, { strict: true });
await fs.writeFile(path.join(options.reportsPath, 'report.xml'), xml);
}
function getMetadata(options: ITestOptions) {
return [
{ name: 'OS', value: OS[getOSType()] },
{ name: 'VS Code', value: options.channel },
{ name: 'Build', value: process.env.AgentJobName },
{ name: 'Python', value: process.env.PYTHON_VERSION }
];
}
/**
* Add metadata into the JSON report.
* Useful for later (when we merge all reports into one and generate html reports).
*
* @export
* @param {ITestOptions} options
* @param {string} cucumberReportJsonFilePath
*/
export async function addReportMetadata(options: ITestOptions, cucumberReportJsonFilePath: string) {
const metadata = getMetadata(options);
// Write custom metadata (make it part of Json report for later use).
// This way cucumber report has the data.
const reportData = JSON.parse(await fs.readFile(cucumberReportJsonFilePath, 'utf8'));
for (const item of reportData) {
item.metadata = JSON.parse(JSON.stringify(metadata));
}
await fs.writeFile(cucumberReportJsonFilePath, JSON.stringify(reportData));
}
/**
* Generate HTML report.
* (store metadata into cucumber json report for when multiple reports are merged into one).
*
* @export
* @param {ITestOptions} options
* @param {string} cucumberReportJsonFilePath
*/
export async function generateHtmlReport(options: ITestOptions, cucumberReportJsonFilePath: string) {
// Generate the report.
const htmlFile = path.join(options.reportsPath, 'index.html');
const reportOptions = {
name: 'Python VS Code',
brandTitle: 'UI Tests',
theme: 'hierarchy',
jsonFile: cucumberReportJsonFilePath,
output: htmlFile,
reportSuiteAsScenarios: true,
launchReport: false,
metadata: {}
};
getMetadata(options).forEach(item => ((reportOptions.metadata as any)[item.name] = item.value));
reporter.generate(reportOptions);
}
/**
* Merge multiple cucumber reports into one.
* (we expect metadata to be stored in cucumber json).
*
* @export
* @param {string} cucumberReportsPath
* @param {string} outputDir
*/
export async function mergeAndgenerateHtmlReport(cucumberReportsPath: string, outputDir: string) {
report.generate({
jsonDir: cucumberReportsPath,
reportPath: outputDir,
pageTitle: 'Python VS Code',
reportName: 'UI Tests',
customMetadata: true,
displayDuration: true
});
}

Просмотреть файл

@ -0,0 +1,50 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
export type RetryTimeoutOptions = {
/**
* Number of times to try.
*
* @type {number}
*/
timeout: number;
/**
* Time in ms to wait before retrying (generally defaults to 100ms).
*
* @type {number}
*/
interval?: number;
errorMessage?: string;
/**
* If true, then do not log failures.
* Defaults to true.
*
* @type {boolean}
*/
logFailures?: boolean;
};
export type RetryCounterOptions = {
/**
* Number of times to try.
*
* @type {number}
*/
count: number;
/**
* Time in ms to wait before retrying (generally defaults to 100ms).
*
* @type {number}
*/
interval?: number;
errorMessage?: string;
/**
* If true, then do not log failures.
* Defaults to true.
*
* @type {boolean}
*/
logFailures?: boolean;
};
export type RetryOptions = RetryTimeoutOptions | RetryCounterOptions;

Просмотреть файл

@ -0,0 +1,61 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
// tslint:disable: no-var-requires no-require-imports no-default-export no-console
const gulp = require('gulp');
const vzip = require('gulp-vinyl-zip');
const vfs = require('vinyl-fs');
const untar = require('gulp-untar');
const gunzip = require('gulp-gunzip');
const chmod = require('gulp-chmod');
const filter = require('gulp-filter');
import * as fs from 'fs-extra';
import * as glob from 'glob';
import * as path from 'path';
import { debug } from './logger';
export function unzipVSCode(zipFile: string, targetDir: string) {
debug(`Unzip VSCode ${zipFile} into ${targetDir}`);
const fn = zipFile.indexOf('.gz') > 0 || zipFile.indexOf('.tag') > 0 ? unzipTarGz : unzipFile;
return fn(zipFile, targetDir);
}
export async function unzipFile(zipFile: string, targetFolder: string) {
debug(`Unzip (unzipFile) ${zipFile} into ${targetFolder}`);
await fs.ensureDir(targetFolder);
return new Promise((resolve, reject) => {
gulp.src(zipFile)
.pipe(vzip.src())
.pipe(vfs.dest(targetFolder))
.on('end', resolve)
.on('error', reject);
});
}
export async function unzipTarGz(zipFile: string, targetFolder: string) {
debug(`Unzip (unzipTarGz) ${zipFile} into ${targetFolder}`);
const fileToFixPermissions = ['VSCode-linux-x64/code', 'VSCode-linux-x64/code-insiders', 'VSCode-linux-x64/resources/app/node_modules*/vscode-ripgrep/**/rg'];
await fs.ensureDir(targetFolder);
await new Promise((resolve, reject) => {
const gulpFilter = filter(fileToFixPermissions, { restore: true });
gulp.src(zipFile)
.pipe(gunzip())
.pipe(untar())
.pipe(gulpFilter)
.pipe(chmod(493)) // 0o755
.pipe(gulpFilter.restore)
.pipe(vfs.dest(targetFolder))
.on('end', resolve)
.on('error', reject);
});
for (const fileGlob of fileToFixPermissions) {
const files = await new Promise<string[]>((resolve, reject) => {
glob(path.join(targetFolder, fileGlob), (ex, items) => (ex ? reject(ex) : resolve(items)));
});
await Promise.all(files.map(file => fs.chmod(file, '755')));
}
}

169
uitests/src/index.ts Normal file
Просмотреть файл

@ -0,0 +1,169 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
import * as cp from 'child_process';
import * as path from 'path';
import * as yargs from 'yargs';
import { sleep } from './helpers';
import { info, initialize } from './helpers/logger';
import { mergeAndgenerateHtmlReport } from './helpers/report';
import { downloadVSCode, getTestOptions, installExtensions, TestOptions, waitForPythonExtensionToActivate } from './setup';
import { start } from './testRunner';
import { Channel } from './types';
import { Application } from './vscode';
// tslint:disable: no-console
const channels: Channel[] = ['insider', 'stable'];
const channelOption = {
describe: 'VS Code Channel',
default: 'stable' as Channel,
choices: channels
};
const destinationOption = {
describe: 'Destination for download path',
default: './.vscode test'
};
const enableVerboseLogging = {
describe: 'Enable verbose (debug) logging',
default: false,
boolean: true
};
// tslint:disable-next-line: no-unused-expression
const parsedArgs = yargs
.command({
command: 'download',
describe: 'Downloads VS Code',
builder: (args: yargs.Argv) =>
args
.option('channel', channelOption)
.option('destination', destinationOption)
.option('verbose', enableVerboseLogging),
handler: async argv => {
initialize(argv.verbose);
downloadVSCode(argv.channel, path.resolve(argv.destination)).catch(console.error);
}
})
.command({
command: 'install',
describe: 'Installs the extensions into VS Code',
builder: (args: yargs.Argv) =>
args
.option('channel', channelOption)
.option('destination', destinationOption)
.option('verbose', enableVerboseLogging)
.option('vsix', {
describe: 'Path to Python Extension',
default: './ms-python-insiders.vsix'
}),
handler: async argv => {
initialize(argv.verbose);
await installExtensions(argv.channel, path.resolve(argv.destination), path.resolve(argv.vsix));
}
})
.command({
command: 'launch',
describe: 'Launches VS Code',
builder: (args: yargs.Argv) =>
args
.option('channel', channelOption)
.option('destination', destinationOption)
.option('verbose', enableVerboseLogging)
.option('timeout', {
alias: 't',
describe: 'Timeout (ms) before closing VS Code',
default: 5 * 60 * 1_000
}),
handler: async argv => {
initialize(argv.verbose);
const options = getTestOptions(argv.channel, path.resolve(argv.destination), 'python', argv.verbose);
const app = new Application(options);
info(app.channel);
await (app.options as TestOptions).initilize();
await app
.start()
.then(() => info('VS Code successfully launched'))
.catch(console.error.bind(console, 'Failed to launch VS Code'));
await waitForPythonExtensionToActivate(60_000, app);
await sleep(100_000);
await app.quickopen.runCommand('View: Close Editor');
}
})
.command({
command: 'test',
describe: "Runs the UI Tests (Arguments after '--' are cucumberjs args)",
builder: (args: yargs.Argv) =>
args
.option('channel', channelOption)
.option('destination', destinationOption)
.option('verbose', enableVerboseLogging)
.option('pythonPath', {
describe: 'Destination for download path',
default: process.env.CI_PYTHON_PATH || 'python'
})
.example('test', ' # (Runs all tests in stable)')
.example('test', '--channel=insider # (Runs all tests in insiders)')
.example('test', '--channel=insider --pythonPath=c:/python/python.exe # (Runs all tests in insiders)')
.example('test', "-- --tags=@wip # (Runs tests in stable with with tags @wip. Arguments after '--' are cucumberjs args.)")
.example('test', "-- --tags='@smoke and @terminal' # (Runs tests in stable with tags '@smoke and @terminal')"),
handler: async argv => {
initialize(argv.verbose);
const cucumberArgs = argv._.slice(1);
const pythonPath =
argv.pythonPath === 'python'
? cp
.execSync('python -c "import sys;print(sys.executable)"')
.toString()
.trim()
: argv.pythonPath;
await start(argv.channel, path.resolve(argv.destination), argv.verbose, pythonPath, cucumberArgs).catch(ex => {
console.error('UI Tests Failed', ex);
process.exit(1); // Required for CLI to fail on CI servers.
});
}
})
.command({
command: 'report',
describe: 'Merges multiple cucumber JSON reports and generates a single HTML report',
builder: (args: yargs.Argv) =>
args
.option('jsonDir', {
describe: 'Directory containing the Cucumber JSON reports',
demandOption: true
})
.option('htmlOutput', {
describe: 'Target directory for HTML report',
default: path.join(process.cwd(), '.vscode test', 'reports')
}),
handler: argv => mergeAndgenerateHtmlReport(argv.jsonDir as string, argv.htmlOutput)
})
.command({
command: 'steps',
describe: 'List all of the Steps (with arguments and all usages)',
builder: (args: yargs.Argv) =>
args
.option('format', {
describe: 'Where should the steps be displayed as plain text or JSON',
default: 'text',
choices: ['text', 'json']
})
.option('file', {
describe: 'Whether to print output to a file'
})
.example('steps', '# Lists all steps'),
handler: argv => {
console.log('test', argv);
}
})
.demandCommand()
.help()
.version(false).argv;
// argv needs to be retained by compiler.
// Hence we need a bogus use of the .argv value.
if (parsedArgs._.length === 0) {
console.log(parsedArgs);
}

322
uitests/src/selectors.ts Normal file
Просмотреть файл

@ -0,0 +1,322 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
import { pyBootstrapActivatedStatusBarTooltip, pyBootstrapTooltip } from './constants';
import { Channel } from './types';
export enum Selector {
/**
* Selector for the Bootstrap extensions statubar item.
*/
'PyBootstrapStatusBar',
/**
* Selector for Python extension statusbar item .
*/
'PythonExtensionStatusBar',
/**
* Selector for the VSC statubar item displaying the line & column.
* This is the item on the bottom right e.g. `Ln 12, Col 56`.
*/
'ColumnLineNumbnerStatusBar',
/**
* Selector for the statusbar created by Bootstrap extensions when Python Extension gets activated.
* (basically if this status bar item exists, then Python Extension has activated).
*/
'PyBootstrapActivatedStatusBar',
/**
* Selector for our custom statubar item (for the uitests) displaying the line & column.
* This is the item on the bottom left display line & column as `12,4`.
*/
'CurrentEditorLineColumnStatusBar',
/**
* Selector for Explorer Activity Bar
*/
'ExplorerActivityBar',
/**
* Selector for Debug Activity Bar
*/
'DebugActivityBar',
/**
* Input in the dropdown of the Debug Configuration picker.
*/
'DebugConfigurationPickerDropDownInput',
/**
* The visibility of this indicates the debugger has started.
*/
'DebugToolbar',
/**
* Selector for an icon in the debug toolbar
*/
'DebugToolbarIcon',
'MaximizePanel',
'MinimizePanel',
/**
* Selector for individual lines in the visible output panel.
*/
'IndividualLinesInOutputPanel',
/**
* Individual notification.
*/
'Notification',
/**
* Individual notification (type = error).
*/
'NotificationError',
'IndividualNotification',
/**
* Message displayed in the nth Individual notification.
*/
'NthNotificationMessage',
/**
* The (x) for the nth Individual notification.
*/
'CloseButtonInNthNotification',
/**
* The selector for a button in the nth Individual notification.
*/
'ButtonInNthNotification',
/**
* The number of problems (this is a number next to `Problems` text in the panel).
*/
'ProblemsBadge',
/**
* Selector to check whether problems panel is visible.
*/
'ProblemsPanel',
/**
* Selector for the file name in a problem in the problems panel.
*/
'FileNameInProblemsPanel',
/**
* Selector for the problem message in a problem in the problems panel.
*/
'ProblemMessageInProblemsPanel',
/**
* Quick input container
*/
'QuickInput',
/**
* Input box in the Quick Input
*/
'QuickInputInput',
/**
* Input box in the quick open dropdown
*/
'QuickOpenInput',
/**
* Selector for when quick open has been hidden.
*/
'QuickOpenHidden',
/**
* Selector for individual items displayed in the quick open dropdown
*/
'QuickOpenEntryLabel',
'QuickOpenEntryLineLabel',
/**
* Selector for individual items that are focused and displayed in the quick open dropdown
*/
'QuickOpenEntryLabelFocused',
'QuickOpenEntryLabelFocused2',
/**
* Selector for the test activitybar/test explorer.
*/
'TestActivityBar',
/**
* Selector to check visibility of the test explorer icon in the activity bar.
*/
'TestActivityIcon',
/**
* Icon in toolbar of test explorer.
*/
'TestExplorerToolbarcon',
/**
* Selector for Side bar.
*/
'SideBar',
/**
* Selector for a node in the test explorer.
*/
'TestExplorerNode',
/**
* Selector for the nth node in the test explorer.
*/
'NthTestExplorerNode',
/**
* Selector for a label in the nth node of a test explorer.
*/
'NthTestExplorerNodeLabel',
/**
* Selector for the icon in the nth node of a test explorer.
* Used to get details of the icon (backgroundImage) of the displayed icon.
*/
'NthTestExplorerNodeIcon',
/**
* Selector for the treeview container of the test explorer.
* This is used to set focus to the test explorer tree view and press keys for navigation in tree view.
*/
'TestExplorerTreeViewContainer',
/**
* Selector for the items in the auto completion list.
*/
'AutoCompletionListItem'
}
// Selector for container of notifications.
const messageBoxContainer = 'div.notifications-toasts.visible div.notification-toast-container';
// Selector for individual notification message.
const messageBoxSelector = `${messageBoxContainer} div.notification-list-item-message span`;
const quickOpen = 'div.monaco-quick-open-widget';
// Selector
// tslint:disable: no-unnecessary-class
export class QuickOpen {
public static QUICK_OPEN = 'div.monaco-quick-open-widget';
public static QUICK_OPEN_HIDDEN = 'div.monaco-quick-open-widget[aria-hidden="true"]';
public static QUICK_OPEN_INPUT = `${QuickOpen.QUICK_OPEN} .quick-open-input input`;
public static QUICK_OPEN_FOCUSED_ELEMENT = `${QuickOpen.QUICK_OPEN} .quick-open-tree .monaco-tree-row.focused .monaco-highlighted-label`;
public static QUICK_OPEN_ENTRY_SELECTOR = 'div[aria-label="Quick Picker"] .monaco-tree-rows.show-twisties .monaco-tree-row .quick-open-entry';
public static QUICK_OPEN_ENTRY_LABEL_SELECTOR = 'div[aria-label="Quick Picker"] .monaco-tree-rows.show-twisties .monaco-tree-row .quick-open-entry .label-name';
public static QUICK_OPEN_ENTRY_LINE_LABEL_SELECTOR =
'div[aria-label="Quick Picker"] .monaco-tree-rows.show-twisties .monaco-tree-row.focused .quick-open-entry .monaco-label-description-container .label-name .monaco-highlighted-label span';
}
class QuickInput {
public static QUICK_INPUT = '.quick-input-widget';
public static QUICK_INPUT_INPUT = `${QuickInput.QUICK_INPUT} .quick-input-box input`;
public static QUICK_INPUT_FOCUSED_ELEMENT = `${QuickInput.QUICK_INPUT} .quick-open-tree .monaco-tree-row.focused .monaco-highlighted-label`;
}
const selectors: Record<Selector, { stable: string } & { insider?: string }> = {
[Selector.PythonExtensionStatusBar]: {
stable: ".statusbar-item[id='ms-python.python']"
},
[Selector.PyBootstrapStatusBar]: {
stable: `.part.statusbar *[title='${pyBootstrapTooltip}'] a`
},
[Selector.PyBootstrapActivatedStatusBar]: {
stable: `.part.statusbar *[title='${pyBootstrapActivatedStatusBarTooltip}'] a`
},
[Selector.CurrentEditorLineColumnStatusBar]: {
stable: ".part.statusbar *[title='PyLine'] a"
},
[Selector.ColumnLineNumbnerStatusBar]: {
stable: 'div.statusbar-item[title="Go to Line"] a'
},
[Selector.ExplorerActivityBar]: {
stable: '.composite.viewlet.explorer-viewlet'
},
[Selector.DebugActivityBar]: {
stable: '.composite.viewlet.debug-viewlet'
},
[Selector.DebugToolbar]: {
stable: 'div.debug-toolbar'
},
[Selector.DebugToolbarIcon]: {
stable: 'div.debug-toolbar .action-item .action-label.icon'
},
[Selector.DebugConfigurationPickerDropDownInput]: {
stable: '.quick-input-widget .quick-input-title'
},
[Selector.MaximizePanel]: {
stable: '.part.panel.bottom a.action-label.maximize-panel-action[title="Toggle Maximized Panel"]',
insider: '.part.panel.bottom a.action-label[title="Maximize Panel Size"]'
},
[Selector.MinimizePanel]: {
stable: '.part.panel.bottom a.action-label.minimize-panel-action[title="Restore Panel Size"]',
insider: '.part.panel.bottom a.action-label[title="Restore Panel Size"]'
},
[Selector.IndividualLinesInOutputPanel]: {
stable: '.part.panel.bottom .view-lines .view-line span span'
},
[Selector.Notification]: {
stable: '.notifications-toasts.visible .notification-toast-container .notification-list-item.expanded'
},
[Selector.NotificationError]: {
stable: '.notifications-toasts.visible .notification-toast-container .notification-list-item.expanded .notification-list-item-icon.icon-error'
},
[Selector.NthNotificationMessage]: {
stable: '.notifications-toasts.visible .notification-toast-container:nth-child({0}) .notification-list-item.expanded div.notification-list-item-message span'
},
[Selector.IndividualNotification]: {
stable: messageBoxSelector
},
[Selector.CloseButtonInNthNotification]: {
stable: '.notifications-toasts.visible .notification-toast-container:nth-child({0}) .notification-list-item.expanded .action-label.icon.clear-notification-action'
},
[Selector.ButtonInNthNotification]: {
stable: ".notifications-toasts.visible .notification-toast-container:nth-child({0}) .notification-list-item.expanded .monaco-button.monaco-text-button[title='{1}']"
},
[Selector.ProblemsBadge]: {
stable: '.part.panel.bottom .action-item.checked .badge-content'
},
[Selector.FileNameInProblemsPanel]: {
stable: '.part.panel.bottom .content .tree-container .monaco-tl-row .file-icon .label-name span span'
},
[Selector.ProblemMessageInProblemsPanel]: {
stable: '.part.panel.bottom .content .tree-container .monaco-tl-row .marker-message-details'
},
[Selector.QuickOpenInput]: {
stable: `${quickOpen} .quick-open-input input`
},
[Selector.QuickOpenEntryLabel]: {
stable: 'div[aria-label="Quick Picker"] .monaco-tree-rows.show-twisties .monaco-tree-row .quick-open-entry .label-name'
},
[Selector.QuickOpenEntryLabelFocused]: {
stable: 'div[aria-label="Quick Picker"] .monaco-tree-rows.show-twisties .monaco-tree-row.focused .quick-open-entry .label-name .monaco-highlighted-label .highlight'
},
[Selector.QuickOpenEntryLineLabel]: {
stable: QuickOpen.QUICK_OPEN_ENTRY_LINE_LABEL_SELECTOR
},
[Selector.QuickOpenEntryLabelFocused2]: {
stable: '.monaco-tree-row.focused .monaco-icon-label-description-container .monaco-highlighted-label'
},
[Selector.QuickInputInput]: {
stable: QuickInput.QUICK_INPUT_INPUT
},
[Selector.QuickInput]: {
stable: QuickInput.QUICK_INPUT
},
[Selector.TestActivityBar]: {
stable: '.composite.viewlet[id="workbench.view.extension.test"]'
},
[Selector.TestActivityIcon]: {
stable: ".activitybar.left .actions-container a[title='Test']"
},
[Selector.TestExplorerToolbarcon]: {
stable: "div[id='workbench.parts.sidebar'] .action-item a[title='{0}']"
},
[Selector.SideBar]: {
stable: "div[id='workbench.parts.sidebar']"
},
[Selector.NthTestExplorerNodeLabel]: {
stable: 'div[id="workbench.view.extension.test"] div.monaco-tree-row:nth-child({0}) a.label-name'
},
[Selector.NthTestExplorerNodeIcon]: {
stable: 'div[id="workbench.view.extension.test"] div.monaco-tree-row:nth-child({0}) .custom-view-tree-node-item-icon'
},
[Selector.NthTestExplorerNode]: {
stable: 'div[id="workbench.view.extension.test"] div.monaco-tree-row:nth-child({0})'
},
[Selector.TestExplorerNode]: {
stable: 'div[id="workbench.view.extension.test"] .tree-explorer-viewlet-tree-view div.monaco-tree-row'
},
[Selector.TestExplorerTreeViewContainer]: {
stable: "div[id='workbench.view.extension.test'] .monaco-tree"
},
[Selector.QuickOpenHidden]: {
stable: QuickOpen.QUICK_OPEN_HIDDEN
},
[Selector.AutoCompletionListItem]: {
stable: '.editor-widget.suggest-widget.visible .monaco-list-row a.label-name .monaco-highlighted-label'
},
[Selector.ProblemsPanel]: {
stable: '.part.panel.bottom .composite.panel.markers-panel'
}
};
export function getSelector(selector: Selector, channel: Channel): string {
const channelSelector = selectors[selector];
return channelSelector[channel] || selectors[selector].stable;
}

Просмотреть файл

@ -0,0 +1,36 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
import { spawnSync } from 'child_process';
import * as fs from 'fs-extra';
import * as path from 'path';
import { uitestsRootPath } from '../constants';
import { debug } from '../helpers/logger';
/**
* Gets the path to the bootstrap extension.
*
* @export
* @returns
*/
export async function getExtensionPath() {
const sourceDir = path.join(uitestsRootPath, 'bootstrap');
const extensionPath = path.join(sourceDir, 'bootstrap.vsix');
if (await fs.pathExists(extensionPath)) {
debug(`Reusing existing bootstrap extension ${extensionPath}`);
return extensionPath;
}
return new Promise<string>((resolve, reject) => {
debug(`Building bootstrap extension ${extensionPath}`);
const args = ['vsce', 'package', '--out', extensionPath];
const result = spawnSync('npx', args, { cwd: path.join(sourceDir, 'extension') });
const stdErr = (result.stderr || '').toString().trim();
if (stdErr.length > 0) {
return reject(new Error(`Failed to build bootstrap extension. Error: ${result.stderr.toString()}`));
}
debug(`Built bootstrap extension ${extensionPath}`);
resolve(extensionPath);
});
}

Просмотреть файл

@ -0,0 +1,130 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
import * as fs from 'fs-extra';
import * as path from 'path';
import * as tmp from 'tmp';
import { downloadFile, getOSType, OSType, unzipVSCode } from '../helpers';
import { info } from '../helpers/logger';
import { Channel } from '../types';
function getDownloadPlatform() {
switch (process.platform) {
case 'darwin':
return 'darwin';
case 'win32':
return 'win32-archive';
default:
return 'linux-x64';
}
}
const DownloadChannel = {
stable: 'stable',
insider: 'insider'
};
/**
* Gets the download url for VS Code.
* Its possible to hard code the VS Code version number in here for stable versions of VS Code.
* This would be useful to ensure our CI tests always pass.
* E.g. if VSC updates CSS in insiders and release a new version tomorrow, then if we haven't had
* the time to account for the CSS changes, then UI tests will fail.
* Solution is to tie the UI tests to a specific version of VS Code.
*
* @export
* @param {Channel} channel
* @returns
*/
export async function getVSCodeDownloadUrl(channel: Channel) {
const downloadPlatform = getDownloadPlatform();
return `https://update.code.visualstudio.com/latest/${downloadPlatform}/${DownloadChannel[channel]}`;
}
export function getVSCodeExecutablePath(channel: Channel, testDir: string) {
if (process.platform === 'win32') {
return path.join(testDir, channel, channel === 'stable' ? 'Code.exe' : 'Code - Insiders.exe');
} else if (process.platform === 'darwin') {
return path.join(testDir, channel, channel === 'stable' ? 'Visual Studio Code.app/Contents/MacOS/Electron' : 'Visual Studio Code - Insiders.app/Contents/MacOS/Electron');
} else {
return path.join(testDir, channel, channel === 'stable' ? 'VSCode-linux-x64/code' : 'VSCode-linux-x64/code-insiders');
}
}
/**
* Returns the path to the VS Code Electron executable.
*
* @export
* @param {Channel} channel
* @param {string} testDir
* @returns
*/
export function getVSCodeElectronPath(channel: Channel, testDir: string) {
if (process.platform === 'win32') {
return path.join(testDir, channel, channel === 'stable' ? 'Code.exe' : 'Code - Insiders.exe');
} else if (process.platform === 'darwin') {
return path.join(testDir, channel, channel === 'stable' ? 'Visual Studio Code.app/Contents/MacOS/Electron' : 'Visual Studio Code - Insiders.app/Contents/MacOS/Electron');
} else {
return path.join(testDir, channel, channel === 'stable' ? 'VSCode-linux-x64/code' : 'VSCode-linux-x64/code-insiders');
}
}
/**
* Returns the root directory of the VS Code application.
*
* @export
* @param {Channel} channel
* @param {string} testDir
* @returns
*/
export function getVSCodeDirectory(channel: Channel, testDir: string) {
if (process.platform === 'win32') {
return path.join(testDir, channel);
} else if (process.platform === 'darwin') {
return path.join(testDir, channel, channel === 'stable' ? 'Visual Studio Code.app' : 'Visual Studio Code - Insiders.app');
} else {
return path.join(testDir, channel, channel === 'stable' ? 'VSCode-linux-x64' : 'VSCode-linux-x64');
}
}
/**
* Download destination for VS Code.
* If the channel is stable, then this is typically of the form `./.vscode test/stable` else `./.vscode test/insider`.
* Where `.vscode test` is the value of the argument `testDir`.
*
* @param {Channel} channel
* @param {string} testDir
* @returns
*/
function getVSCodeDestinationDirectory(channel: Channel, testDir: string) {
return path.join(testDir, channel === 'stable' ? 'stable' : 'insider');
}
async function hasVSCBeenDownloaded(channel: Channel, testDir: string) {
const vscodeDir = getVSCodeDestinationDirectory(channel, testDir);
return fs.pathExists(vscodeDir);
}
export async function downloadVSCode(channel: Channel, testDir: string) {
if (await hasVSCBeenDownloaded(channel, testDir)) {
info('VS Code already downloaded.');
return;
}
const targetDir = getVSCodeDestinationDirectory(channel, testDir);
const url = await getVSCodeDownloadUrl(channel);
const ostype = getOSType();
const filePostfix = ostype === OSType.Linux ? 'vscode.tar.gz' : 'vscode.zip';
const targetFile = await new Promise<string>((resolve, reject) => {
tmp.tmpName({ postfix: filePostfix }, (ex, fileName) => {
if (ex) {
return reject(ex);
}
resolve(fileName);
});
});
await downloadFile(url, targetFile, `Downloading VS Code ${channel === 'stable' ? 'Stable' : 'Insider'}`);
await unzipVSCode(targetFile, targetDir);
info('VS Code successfully downloaded.');
}

324
uitests/src/setup/driver.ts Normal file
Просмотреть файл

@ -0,0 +1,324 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
import { ChildProcess } from 'child_process';
import { EventEmitter } from 'events';
import { Browser, ClickOptions, ElementHandle, launch, Page, UnwrapElementHandle, WrapElementHandle } from 'puppeteer-core';
import { URI } from 'vscode-uri';
import { isCI } from '../constants';
import { noop, RetryOptions, retryWrapper, sleep } from '../helpers';
import { debug, warn } from '../helpers/logger';
import { getSelector, Selector } from '../selectors';
import { ElementsSelectorPredicate, IDriver, ITestOptions, SelectorRetryOptions, WaitForSelectorOptions, WaitForSelectorOptionsHidden } from '../types';
import { getVSCodeElectronPath } from './downloader';
// Time to wait for UI to react to user typing in a textbox.
// If its too low (then VSC UI won't have enough time to react the keys being typed into the input boxes).
// 100ms seems to be the sweetspot (any slower, then UI tests will be slow).
// Right now using 100ms seems to be enough, 50ms might be enough as well, but 100ms works.
const waitTimeoutAfterTypging = 100;
/*
Hacky way to translate control keys into puppeteer keys.
Better way would be to wrap this up with a class.
(plenty of places to get inspiration from .NET, Java, Flex, etc)...
Current approach is quite sloppy.
*/
const KeyTranslations: Record<string, string> = {
alt: 'Alt',
control: 'Control',
ctrl: 'Control',
shift: 'Shift',
space: 'Space',
Escape: 'Escape',
escape: 'Escape',
esc: 'Escape',
Enter: 'Enter',
enter: 'Enter',
down: 'ArrowDown',
right: 'ArrowRight',
left: 'ArrowLeft',
tab: 'Tab'
};
/**
* Given a key (control key or standard alphanumeric character),
* convert them into a key understoon by puppeteer.
*
* @param {string} key
* @returns {string}
*/
function normalizeKey(key: string): string {
return key in KeyTranslations ? KeyTranslations[key] : key;
}
/**
* This is what loads VS Code.
* VS Code is launched using puppeteer and provides the ability to run CSS queries against the dom and perform UI actions.
* This is the heart of the UI test.
*
* @export
* @class Driver
* @extends {EventEmitter}
* @implements {IDriver}
*/
export class Driver extends EventEmitter implements IDriver {
public get isAlive(): boolean {
return this.process && !this.process.killed ? true : false;
}
private process?: ChildProcess;
private browser!: Browser;
private pages!: Page[];
private mainPage!: Page;
private readonly options: ITestOptions;
constructor(options: ITestOptions) {
super();
this.options = options;
}
/**
* Given the `SelectorRetryOptions`, and an error message, convert it into `RetryOptions`.
* This will be used to retry querying the UI using the `retryWrapper` or `retry` decorator.
*
* @private
* @static
* @param {SelectorRetryOptions} options
* @param {string} fallbackErrorMessage
* @returns {RetryOptions}
* @memberof Driver
*/
private static toRetryOptions(options: SelectorRetryOptions, fallbackErrorMessage: string): RetryOptions {
if ('retryTimeout' in options) {
return {
timeout: options.retryTimeout,
errorMessage: options.errorMessage || fallbackErrorMessage,
logFailures: options.logFailures
};
} else {
return {
count: options.retryCount,
errorMessage: options.errorMessage || fallbackErrorMessage,
logFailures: options.logFailures
};
}
}
/**
* Starts VS Code.
*
* @returns {Promise<void>}
* @memberof Driver
*/
public async start(): Promise<void> {
if (this.process) {
debug('Killing existing instance before starting VS Code');
await this.exit().catch(warn);
}
const electronPath = getVSCodeElectronPath(this.options.channel, this.options.testPath);
// If on CI, run in headless mode.
const ciArgs = isCI ? ['--headless'] : [];
const args = [
...ciArgs,
`--user-data-dir=${this.options.userDataPath}`,
`--extensions-dir=${this.options.extensionsPath}`,
'--skip-getting-started',
'--skip-release-notes',
'--sticky-quickopen',
'--disable-telemetry',
'--disable-updates',
'--disable-crash-reporter',
'--no-sandbox',
'--no-first-run',
'--disable-dev-shm-usage',
'--disable-setuid-sandbox',
`--folder-uri=${URI.file(this.options.workspacePathOrFolder)}`
];
debug(`Launching via puppeteer with electron path ${electronPath} & args ${args.join('\n')}`);
this.browser = await launch({
executablePath: electronPath,
args,
headless: true,
devtools: false,
// This must be set to `null`, else VSC UI resizes in a funky way.
// tslint:disable-next-line: no-null-keyword
defaultViewport: null,
// This must be set to ensure puppeteer doesn't send default (additional) args.
ignoreDefaultArgs: true
});
this.process = this.browser.process();
this.process.on('exit', this.emit.bind(this, 'exit'));
debug(`Launched with process ${this.process.pid}`);
this.pages = await this.browser.pages();
this.pages.forEach(page => {
page.on('error', error => warn('One of the pages have errored', error));
});
this.mainPage = this.pages[0];
// We know it will take at least 1 second, so lets wait for 1 second, no point trying before then.
await sleep(1000);
// Wait for bootstrap extension to load (when this extension is ready, that means VSC is ready for user interaction).
// Based on assumption that if extensions have been activated, then VSC is ready for user interaction.
// Note: This extension loads very quickly (nothing in activation method to slow activation).
debug('Wait for bootstrap extension to actiavte');
await this.waitForSelector(getSelector(Selector.PyBootstrapStatusBar, this.options.channel), {
timeout: 15_000,
visible: true
});
debug('VS Code successfully launched');
}
public async captureScreenshot(filename: string): Promise<Buffer> {
return this.mainPage.screenshot({ path: filename });
}
public async exit(): Promise<void> {
if (!this.process) {
return;
}
this.removeAllListeners();
debug('Shutting down vscode driver');
await this.browser.close().catch(warn);
try {
if (this.process.connected && this.process) {
// If exiting failed, kill the underlying process.
process.kill(this.process.pid);
}
} catch {
noop();
}
this.process = undefined;
}
public async waitForSelector(selector: string, options?: WaitForSelectorOptions): Promise<ElementHandle>;
public async waitForSelector(selector: string, options?: WaitForSelectorOptionsHidden): Promise<ElementHandle | undefined>;
public async waitForSelector(
selector: string,
options?: WaitForSelectorOptions | WaitForSelectorOptionsHidden
// tslint:disable-next-line: no-any
): Promise<any> {
if (options && 'hidden' in options && options.hidden === true) {
// We expect selector to be available.
return this.mainPage.waitForSelector(selector, { timeout: 3000, ...options });
}
// We expect selector to be available.
return this.mainPage.waitForSelector(selector, { visible: true, timeout: 3000, ...options });
}
// tslint:disable-next-line: no-any
public async $(selector: string, options?: SelectorRetryOptions): Promise<any> {
if (!options) {
return this.mainPage.$(selector).then(ele => (ele ? Promise.resolve(ele) : Promise.reject(new Error(`Element not found with selector '${selector}'`))));
}
const wrapper = async (): Promise<ElementHandle> => {
const ele = await this.mainPage.$(selector);
if (ele) {
return ele;
}
debug(`Element not found for selector '${selector}', will retry.`);
throw new Error('Element not found, keep retrying');
};
return retryWrapper(Driver.toRetryOptions(options, `Failed to find for selector '${selector}'`), wrapper);
}
public async $$(selector: string, options?: SelectorRetryOptions & { predicate?: ElementsSelectorPredicate }): Promise<ElementHandle[]> {
if (!options) {
return this.mainPage.$$(selector);
}
const wrapper = async (): Promise<ElementHandle[]> => {
let eles = await this.mainPage.$$(selector);
if (eles.length > 0 && options.predicate) {
eles = options.predicate(eles);
}
if (eles.length > 0) {
return eles;
}
debug(`Elements not found for selector '${selector}', will retry.`);
throw new Error('Elements not found, keep retrying');
};
return retryWrapper(Driver.toRetryOptions(options, `Failed to find for selector '${selector}'`), wrapper);
}
public $eval<R>(selector: string, pageFunction: (element: Element) => R | Promise<R>): Promise<WrapElementHandle<R>>;
public $eval<R, X1>(selector: string, pageFunction: (element: Element, x1: UnwrapElementHandle<X1>) => R | Promise<R>, x1: X1): Promise<WrapElementHandle<R>>;
// tslint:disable-next-line: no-any
public $eval(selector: any, pageFunction: any, x1?: any) {
if (arguments.length === 3) {
return this.mainPage.$eval(selector, pageFunction, x1);
}
return this.mainPage.$eval(selector, pageFunction);
}
public $$eval<R>(selector: string, pageFunction: (elements: Element[]) => R | Promise<R>): Promise<WrapElementHandle<R>>;
public $$eval<R, X1>(selector: string, pageFunction: (elements: Element[], x1: UnwrapElementHandle<X1>) => R | Promise<R>, x1: X1): Promise<WrapElementHandle<R>>;
// tslint:disable-next-line: no-any
public $$eval(selector: any, pageFunction: any, x1?: any) {
return this.mainPage.$$eval(selector, pageFunction, x1);
}
public async click(selector: string, options?: ClickOptions & SelectorRetryOptions): Promise<void> {
if (!options || (!('retryTimeout' in options) && !('retryCount' in options))) {
return this.mainPage.click(selector, options);
}
const wrapper = async (): Promise<void> => {
// Click will throw an error if selector is invalid or element is not found.
await this.mainPage.click(selector, options).catch(ex => {
debug(`Element not found for selector '${selector}', will retry.`);
return Promise.reject(ex);
});
};
return retryWrapper(Driver.toRetryOptions(options, `Failed to click for selector '${selector}'`), wrapper);
}
public async focus(selector: string): Promise<void> {
// Ensure element exists before setting focus.
await this.waitForSelector(selector, { timeout: 500, visible: true });
return this.mainPage.focus(selector);
}
public async hover(selector: string): Promise<void> {
// Ensure element exists before hovering over it.
await this.waitForSelector(selector, { timeout: 500, visible: true });
return this.mainPage.hover(selector);
}
public async type(selector: string, text: string, options?: { delay: number }): Promise<void> {
// Focus the element before typing into it.
await this.focus(selector);
await this.mainPage.type(selector, text, options);
// Wait for text to be typed in (sometimes having this delay helps).
// Not doing this sometimes results in value not being entered in input box.
// Hopefully we don't need bigger delays on CI.
// Cause is the fact that typing into thie textbox causes vscode to filter
// the dropdown list. If we don't waait long enough, then an item isn't selected
// in the dropdown list, meaning the necessary action isn't performed.
// Works much like an html dropdown, we need to wait for UI to react to the input
// before we can hit the enter key.
// We don't need this delay when selecting files from quickopen or selecting
// commands from quick open, as we wait for those items to get highlighted in the dropdown.
// Here we're not waiting for someting to get highlighted, that's where the problem lies.
await sleep(waitTimeoutAfterTypging);
}
public async press(keys: string, options?: { delay: number }): Promise<void> {
debug(`Press key combination ${keys}`);
const individualKeys = keys.split('+').map(normalizeKey);
try {
const pressUpControlKeys: string[] = [];
for (const key of individualKeys) {
if (['Control', 'Shift'].includes(key)) {
debug(`Down ${key}`);
await this.mainPage.keyboard.down(key);
pressUpControlKeys.push(key);
continue;
}
debug(`Press ${key}`);
await this.mainPage.keyboard.press(key, options);
}
while (pressUpControlKeys.length) {
const key = pressUpControlKeys.shift();
if (key) {
debug(`Up ${key}`);
await this.mainPage.keyboard.up(key);
}
}
} finally {
await sleep(waitTimeoutAfterTypging);
}
// Key(s) was pressed, lets wait for UI to react to this.
await sleep(waitTimeoutAfterTypging);
}
}

Просмотреть файл

@ -0,0 +1,163 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
import { spawnSync } from 'child_process';
import { HookScenarioResult, pickle } from 'cucumber';
import * as fs from 'fs-extra';
import * as path from 'path';
import { uitestsRootPath } from '../constants';
import { sleep } from '../helpers';
import { debug } from '../helpers/logger';
import { IApplication } from '../types';
/**
* Dismiss messages that are not required.
* E.g. attempt to dismiss messages such that they never appear.
*/
export async function dismissMessages(app: IApplication) {
const messages = [
{ content: 'Try out Preview of our new Python Language Server', buttonText: 'No thanks' },
{ content: 'Tip: you can change the Python interpreter used by the', buttonText: 'Got it!' },
{ content: 'Help improve VS Code by allowing' },
{ content: 'Linter pylint is not installed', buttonText: 'Do not show again' },
{ content: 'Would you like to run code in the', buttonText: 'No' }
];
await app.notifications.dismiss(messages, 1000);
}
/**
* When we close VS Code and reopen it, the un saved files are still left open in VSC.
* We need to close them before shutting down VS Code.
*
* @export
* @returns
*/
export async function clearWorkspace(app: IApplication) {
if (!app.isAlive) {
debug('Not clearing workspace as application is not alive');
return;
}
const commands = [
// Custom command in our bootstrap extension.
// We can use the command `Debug: Stop` from the command palette only if a debug session is active.
// Using this approach we can send a command regardless, easy.
// 'Stop Debugging Python',
// Assume we have a max of 2 editors, revert changes and close all of them.
// Hence execute this command twice.
'View: Revert and Close Editor',
'View: Revert and Close Editor',
// 'Terminal: Kill the Active Terminal Instance',
'Debug: Remove All Breakpoints',
// Clear this, else when trying to open files, VSC will list files in file picker dropdown that don't exist.
// This will cause serious issues.
// Assume in a test we had a file named `abc.py`.
// Next test we create a file named `ab.py`. At this point, VSC will remember the file from previous session and will display `abc.py`.
// Thats a serious problem.
'File: Clear Recently Opened',
// Same reason as clearing `Recently Opened`
'Clear Editor History',
// Same reason as clearing `Recently Opened`
// We don't want the command history to be polluted (we don't care about previous sessions).
'Clear Command History',
'View: Close All Editors',
'Notifications: Clear All Notifications',
'View: Close Panel'
];
for (const command of commands) {
await app.quickopen.runCommand(command);
}
// Wait for UI to get updated (closing editors, closing panel, etc).
await sleep(200);
}
/**
* Gets the git repo that needs to be downloaded for given tags.
*
* @param {pickle.Tag[]} tags
* @returns {({ url: string; subDirectory?: string } | undefined)}
*/
export function getGitRepo(tags: pickle.Tag[]): { url: string; subDirectory?: string } | undefined {
const tagWithUrl = tags.find(tag => tag.name.toLowerCase().startsWith('@https://github.com/'));
const url = tagWithUrl ? tagWithUrl.name.substring(1) : undefined;
if (!url) {
return;
}
if (url.toLowerCase().endsWith('.git')) {
return { url };
}
const repoParts = url.substring('https://github.com/'.length).split('/');
let subDirectory: string | undefined;
if (repoParts.length > 2) {
subDirectory = repoParts.filter((_, i) => i > 1).join('/');
}
return {
url: `https://github.com/${repoParts[0]}/${repoParts[1]}`,
subDirectory
};
}
/**
* Gets the path to the folder that contains the source for the test.
*
* @param {pickle.Tag[]} tags
* @returns {({ url: string; subDirectory?: string } | undefined)}
*/
export function getSourceFolder(tags: pickle.Tag[]): string | undefined {
const sourceFolder = tags.find(tag => tag.name.toLowerCase().startsWith('@code:'));
if (!sourceFolder) {
return;
}
return path.join(uitestsRootPath, sourceFolder.name.substring('@code:'.length));
}
/**
* Clones the git repo into the provided directory.
* @param {{ url: string; subDirectory?: string }} repo
* @param {string} cwd
* @returns {Promise<void>}
*/
async function cloneGitRepo({ url }: { url: string }, cwd: string): Promise<void> {
debug(`Clone git repo ${url}`);
await new Promise((resolve, reject) => {
const proc = spawnSync('git', ['clone', url, '.'], { cwd });
return proc.error ? reject(proc.error) : resolve();
});
}
/**
* Initializes the workspace folder with the required code (downloads the git repo if required).
* Returns the new workspace folder.
*
* @export
* @param {HookScenarioResult} scenario
* @param {string} workspaceFolder
* @returns {(Promise<string | undefined>)}
*/
export async function initializeWorkspace(scenario: HookScenarioResult, workspaceFolder: string): Promise<string | undefined> {
const sourceFolder = getSourceFolder(scenario.pickle.tags);
if (sourceFolder) {
debug(`initializeWorkspace for ${sourceFolder}`);
// Copy files from source folder into workspace folder.
await fs.copy(sourceFolder, workspaceFolder);
return;
}
const repo = getGitRepo(scenario.pickle.tags);
if (!repo) {
debug('initializeWorkspace without a repo');
return;
}
debug(`initializeWorkspace for ${repo.url}`);
await cloneGitRepo(repo, workspaceFolder);
// Its possible source_repo is https://github.com/Microsoft/vscode-python/tree/master/build
// Meaning, we want to glon https://github.com/Microsoft/vscode-python
// and want the workspace folder to be tree / master / build when cloned.
if (repo.subDirectory) {
debug(`initializeWorkspace for ${repo.url} in subdirectory ${repo.subDirectory}`);
return path.join(workspaceFolder, ...repo.subDirectory.replace(/\\/g, '/').split('/'));
}
}

Просмотреть файл

@ -0,0 +1,8 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
export * from './bootstrap';
export * from './downloader';
export * from './setup';

262
uitests/src/setup/setup.ts Normal file
Просмотреть файл

@ -0,0 +1,262 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
import * as cp from 'child_process';
import { HookScenarioResult } from 'cucumber';
import * as fs from 'fs-extra';
import * as path from 'path';
import * as rimraf from 'rimraf';
import * as tmp from 'tmp';
import { isCI } from '../constants';
import { noop, sleep, unzipFile } from '../helpers';
import { debug, info, initialize as initializeLogger, warn } from '../helpers/logger';
import { Selector } from '../selectors';
import { Channel, IApplication, ITestOptions } from '../types';
import { getExtensionPath as getBootstrapExtensionPath } from './bootstrap';
// tslint:disable: no-console
export class TestOptions implements ITestOptions {
/**
* Make static, as we might have a couple of runs of same tests.
* We will use this to ensure we have a unique name (counter increases per process session, hence no conflicts).
*
* @private
* @static
* @memberof TestOptions
*/
private static workspaceCounter = 0;
private _reportsPath?: string;
private _workspacePathOrFolder!: string;
get extensionsPath(): string {
return path.join(this.testPath, 'extensions');
}
get userDataPath(): string {
return path.join(this.testPath, 'user');
}
get userSettingsFilePath(): string {
return path.join(this.userDataPath, 'User', 'settings.json');
}
get screenshotsPath(): string {
return path.join(this._reportsPath || this.testPath, 'screenshots');
}
get rootReportsPath(): string {
return path.join(this.testPath, 'reports');
}
get reportsPath(): string {
return this._reportsPath || this.rootReportsPath;
}
get logsPath(): string {
return path.join(this._reportsPath || this.testPath, 'logs');
}
get workspacePathOrFolder(): string {
return this._workspacePathOrFolder;
}
constructor(
public readonly channel: Channel,
public readonly testPath: string,
public readonly tempPath: string,
public readonly verbose: boolean,
public readonly pythonPath: string = 'python'
) {
this._workspacePathOrFolder = path.join(this.tempPath, 'workspace folder');
}
/**
* Initialize environment for the tests.
*
* @memberof TestOptions
*/
public async initilize() {
this._workspacePathOrFolder = this._workspacePathOrFolder || path.join(this.tempPath, `workspace folder${(TestOptions.workspaceCounter += 1)}`);
await Promise.all([
new Promise(resolve => rimraf(this.tempPath, resolve)).catch(warn.bind(warn, 'Failed to empty temp dir in updateForScenario')),
new Promise(resolve => rimraf(this._workspacePathOrFolder, resolve)).catch(warn.bind(warn, 'Failed to create workspace directory'))
]);
await Promise.all([
fs.ensureDir(this.tempPath),
fs.ensureDir(this._workspacePathOrFolder),
fs.ensureDir(this.screenshotsPath),
fs.ensureDir(this.rootReportsPath),
fs.ensureDir(this.reportsPath)
]);
// Set variables for logging to be enabled within extension.
process.env.TF_BUILD = 'true';
// Where are the Python extension logs written to.
process.env.VSC_PYTHON_LOG_FILE = path.join(this.logsPath, 'pvsc.log');
// Ensure PTVSD logs are in the reports directory,
// This way they are available for analyzing.
process.env.PTVSD_LOG_DIR = this.logsPath;
// Disable process logging (src/client/common/process/logger.ts).
// Minimal logging in output channel (cuz we look for specific text in output channel).
process.env.UITEST_DISABLE_PROCESS_LOGGING = 'true';
// Disable Insiders in UI Tests for now.
process.env.UITEST_DISABLE_INSIDERS = 'true';
}
/**
* Update the options for the tests based on the provided scenario.
* Initialize paths where various logs and screenshots related to a test run will be stored.
* Path provided must be a relative path. As it will be created in the reports directory.
*
* @param {HookScenarioResult} scenario
* @returns
* @memberof TestOptions
*/
public async updateForScenario(scenario: HookScenarioResult) {
const location = scenario.pickle.locations[0].line;
this._reportsPath = path.join(this.rootReportsPath, `${scenario.pickle.name}:${location}:_${TestOptions.workspaceCounter}`.replace(/[^a-z0-9\-]/gi, '_'));
this._workspacePathOrFolder = path.join(this.tempPath, `workspace folder${(TestOptions.workspaceCounter += 1)}`);
await this.initilize();
}
public udpateWorkspaceFolder(workspaceFolder: string) {
this._workspacePathOrFolder = workspaceFolder;
}
}
/**
* Get options for the UI Tests.
*
* @export
* @returns {TestOptions}
*/
export function getTestOptions(channel: Channel, testDir: string, pythonPath: string = 'python', verboseLogging: boolean = false): ITestOptions {
pythonPath =
pythonPath ||
cp
.execSync('python -c "import sys;print(sys.executable)"')
.toString()
.trim();
const options = new TestOptions(channel, testDir, path.join(testDir, 'temp folder'), verboseLogging, pythonPath);
[options.tempPath, options.userDataPath, options.logsPath, options.screenshotsPath, options.workspacePathOrFolder].forEach(dir => {
try {
rimraf.sync(dir);
} catch {
// Ignore.
}
});
[
options.testPath,
options.extensionsPath,
options.userDataPath,
options.screenshotsPath,
options.reportsPath,
options.logsPath,
options.workspacePathOrFolder,
options.tempPath,
path.dirname(options.userSettingsFilePath)
].map(dir => {
try {
fs.ensureDirSync(dir);
} catch {
// Ignore
}
});
initializeLogger(verboseLogging, path.join(options.logsPath, 'uitests.log'));
return options;
}
export async function installExtensions(channel: Channel, testDir: string, vsixPath: string): Promise<void> {
const options = getTestOptions(channel, testDir);
await installExtension(options.extensionsPath, 'ms-python.python', vsixPath);
const bootstrapExension = await getBootstrapExtensionPath();
await installExtension(options.extensionsPath, 'ms-python.bootstrap', bootstrapExension);
info('Installed extensions');
}
export async function restoreDefaultUserSettings(options: ITestOptions) {
await initializeDefaultUserSettings(options, getExtensionSpecificUserSettingsForAllTests());
}
function getExtensionSpecificUserSettingsForAllTests(): { [key: string]: {} } {
return {
// Log everything in LS server, to ensure they are captured in reports.
// Found under.vscode test/reports/user/logs/xxx/exthostx/output_logging_xxx/x-Python.log
// These are logs created by VSC.
// Enabling this makes it difficult to look for text in the panel(there's too much content).
// "python.analysis.logLevel": "Trace",
'python.venvFolders': ['envs', '.pyenv', '.direnv', '.local/share/virtualenvs'],
// Disable pylint(we don't want this message)
'python.linting.pylintEnabled': false
};
}
async function initializeDefaultUserSettings(opts: ITestOptions, additionalSettings: { [key: string]: {} } = {}) {
const settingsToAdd: { [key: string]: {} } = {
'python.pythonPath': opts.pythonPath,
// We dont need these(avoid VSC from displaying prompts).
'telemetry.enableTelemetry': false,
// We don't want extensions getting updated/installed automatically.
'extensions.autoUpdate': false,
'telemetry.enableCrashReporter': false,
// Download latest (upon first load), do not update while tests are running.
'python.autoUpdateLanguageServer': false,
// Minimal logging in output channel (cuz we look for specific text in output channel).
'python.analysis.logLevel': 'Error',
// Disable experiments, we don't want unexpected behaviors.
// Experiments result in dynamic (chance) runtime behaviors.
'python.experiments.enabled': false,
'debug.showInStatusBar': 'never', // Save some more room in statusbar.
// We don't want VSC to complete the brackets.
// When sending text to editors, such as json files, VSC will automatically complete brackets.
//And that messes up with the text thats being sent to the editor.
'editor.autoClosingBrackets': 'never',
'editor.autoClosingOvertype': 'never',
'editor.autoClosingQuotes': 'never',
// We need more realestate.
'editor.minimap.enabled': false,
// We don't want any surprises.
'extensions.autoCheckUpdates': false,
'update.mode': 'none',
...additionalSettings
};
// See logic in here https://github.com/Microsoft/vscode-python/blob/master/src/client/common/insidersBuild/insidersExtensionService.ts
if (opts.channel === 'insider') {
// We don't want insiders getting installed (at all).
// That'll break everything.
settingsToAdd['python.insidersChannel'] = 'off';
}
// Maximize the window and reduce font size only on CI.
if (isCI) {
// Start VS Code maximized(good for screenshots and the like).
// Also more realestate(capturing logs, etc).
settingsToAdd['window.newWindowDimensions'] = 'maximized';
}
await initializeUserSettings(opts, settingsToAdd);
}
export async function waitForPythonExtensionToActivate(timeout: number, app: IApplication) {
debug('Start activating Python Extension');
await app.quickopen.runCommand('Activate Python Extension');
// We know it will take at least 1 second, so lets wait for 1 second, no point trying before then.
await sleep(1000);
const selector = app.getCSSSelector(Selector.PyBootstrapActivatedStatusBar);
await app.driver.waitForSelector(selector, { timeout, visible: true });
debug('Python Extension activation completed');
}
async function initializeUserSettings(opts: ITestOptions, settings: { [key: string]: {} }) {
debug(`initializeUserSettings ${opts.userSettingsFilePath} with ${JSON.stringify(settings)}`);
await fs.mkdirp(path.dirname(opts.userSettingsFilePath)).catch(noop);
return fs.writeFile(opts.userSettingsFilePath, JSON.stringify(settings, undefined, 4), 'utf8');
}
async function installExtension(extensionsDir: string, extensionName: string, vsixPath: string) {
await new Promise(resolve => rimraf(path.join(extensionsDir, extensionName), resolve)).catch(noop);
const tmpDir = await new Promise<string>((resolve, reject) => {
tmp.dir((ex: Error, dir: string) => {
if (ex) {
return reject(ex);
}
resolve(dir);
});
});
await unzipFile(vsixPath, tmpDir);
await fs.copy(path.join(tmpDir, 'extension'), path.join(extensionsDir, extensionName));
await new Promise(resolve => rimraf(tmpDir, resolve)).catch(noop);
}

Просмотреть файл

@ -0,0 +1,16 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
// tslint:disable: no-invalid-this
import { Then, When } from 'cucumber';
When('I select the command {string}', async function(command: string) {
await this.app.quickopen.runCommand(command);
});
Then('select the command {string}', async function(command: string) {
await this.app.quickopen.runCommand(command);
});

92
uitests/src/steps/core.ts Normal file
Просмотреть файл

@ -0,0 +1,92 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
// tslint:disable: no-invalid-this
import * as colors from 'colors';
import { Given, Then, When } from 'cucumber';
import { extensionActivationTimeout } from '../constants';
import { noop, sleep } from '../helpers';
import { waitForPythonExtensionToActivate } from '../setup';
Then('do nothing', noop);
Then('kaboom', () => {
throw new Error('Kaboom');
});
Then('wip', noop);
Then('Step {string}', async (_step: string) => {
noop();
});
Given('VS Code is opened for the first time', async function() {
await this.app.exit();
await this.app.start(true);
});
When('I open VS Code for the first time', async function() {
await this.app.exit();
await this.app.start(true);
});
Given('VS Code is closed', function() {
return this.app.exit();
});
When('I close VS Code', function() {
return this.app.exit();
});
When('I start VS Code', function() {
return this.app.start();
});
When('I reload VS Code', function() {
return this.app.reload();
});
When('I wait for a maximum of {int} seconds for the Python extension to get activated', async function(seconds: number) {
await waitForPythonExtensionToActivate(seconds * 1000, this.app);
});
When('I wait for the Python extension to activate', async function() {
await waitForPythonExtensionToActivate(extensionActivationTimeout, this.app);
});
When('the Python extension has activated', async function() {
await waitForPythonExtensionToActivate(extensionActivationTimeout, this.app);
});
Given('the Python extension has been activated', async function() {
await waitForPythonExtensionToActivate(extensionActivationTimeout, this.app);
});
When('I wait for {int} second(s)', async (seconds: number) => sleep(seconds * 1000));
Then('wait for {int} millisecond(s)', sleep);
When('I wait for {int} millisecond(s)', sleep);
Then('wait for {int} second(s)', (seconds: number) => sleep(seconds * 1000));
Then('take a screenshot', async function() {
// await sleep(500);
await this.app.captureScreenshot(`take_a_screenshot_${new Date().getTime().toString()}`);
});
// tslint:disable-next-line: no-console
Then('log the message {string}', (message: string) => console.info(colors.green(message)));
When(/^I press (.*)$/, async function(key: string) {
await this.app.driver.press(key);
});
When('I press {word} {int} times', async function(key: string, counter: number) {
for (let i = 0; i <= counter; i += 1) {
await this.app.driver.press(key);
}
});

Просмотреть файл

@ -0,0 +1,113 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
// tslint:disable: no-invalid-this
import { Then, When } from 'cucumber';
import { CucumberRetryMax5Seconds } from '../constants';
Then('the Python Debug Configuration picker is displayed', async function() {
await this.app.debugger.waitForConfigPicker();
});
When('I select the debug configuration {string}', async function(configItem: string) {
await this.app.debugger.selectConfiguration(configItem);
});
Then('the debugger starts', async function() {
await this.app.debugger.waitUntilStarted();
});
Then('the debugger pauses', async function() {
await this.app.debugger.waitUntilPaused();
});
Then('the debugger stops', async function() {
await this.app.debugger.waitUntilStopped();
});
Then('the debugger will stop within {int} seconds', async function(timeoutSeconds: number) {
await this.app.debugger.waitUntilStopped(timeoutSeconds * 1000);
});
Then('the current stack frame is at line {int} in {string}', CucumberRetryMax5Seconds, async function(line: number, fileName: string) {
await this.app.documents.waitForActiveEditor(fileName);
await this.app.documents.waitForPosition({ line });
});
When('I add a breakpoint to line {int}', async function(line: number) {
await this.app.debugger.setBreakpointOnLine(line);
});
When('I add a breakpoint to line {int} in {string}', async function(line: number, fileName: string) {
await this.app.quickopen.openFile(fileName);
await this.app.debugger.setBreakpointOnLine(line);
});
// Given('the debug sidebar is open', async function() {
// await this.app.debugger.openDebugViewlet();
// });
// When('I configure the debugger', async function() {
// await this.app.debugger.configure();
// });
// When('stopOnEntry is true in launch.json', async function() {
// await updateDebugConfiguration('stopOnEntry', true, context.app.workspacePathOrFolder, 0);
// });
// When('stopOnEntry is false in launch.json', async function() {
// await updateDebugConfiguration('stopOnEntry', false, context.app.workspacePathOrFolder, 0);
// });
// Then('debugger starts', async function() {
// await sleep(200);
// await this.app.debugger.debuggerHasStarted();
// });
// When('I open the debug console', async function() {
// // await this.app.debugger.openDebugConsole();
// await context.app.workbench.quickopen.runCommand('View: Debug Console');
// });
// Then('number of variables in variable window is {int}', async function(count: number) {
// await this.app.debugger.waitForVariableCount(count, count);
// });
// When('I step over', async function() {
// // await this.app.debugger.stepOver();
// await context.app.workbench.quickopen.runCommand('Debug: Step Over');
// });
// When('I step in', async function() {
// // await this.app.debugger.stepIn();
// await context.app.workbench.quickopen.runCommand('Debug: Step Into');
// });
// When('I continue', async function() {
// // await this.app.debugger.continue();
// await context.app.workbench.quickopen.runCommand('Debug: Continue');
// });
// Then('stack frame for file {string} is displayed', async function(file: string) {
// await this.app.debugger.waitForStackFrame(
// sf => sf.name.indexOf(file) >= 0,
// 'looking for main.py'
// );
// });
// Then('debugger stops', async function() {
// await this.app.debugger.debuggerHasStopped();
// });
// Then('stack frame for file {string} and line {int} is displayed', async function(file: string, line: number) {
// await this.app.debugger.waitForStackFrame(
// sf => sf.name.indexOf(file) >= 0 && sf.lineNumber === line,
// 'looking for main.py'
// );
// });
// Then('the text {string} is displayed in the debug console', async function(text: string) {
// await this.app.debugger.waitForOutput(output => {
// return output.some(line => line.indexOf(text) >= 0);
// });
// });

Просмотреть файл

@ -0,0 +1,226 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
// tslint:disable: no-invalid-this
import * as assert from 'assert';
import { expect } from 'chai';
import { Given, Then, When } from 'cucumber';
import * as fs from 'fs-extra';
import * as path from 'path';
import { CucumberRetryMax10Seconds, CucumberRetryMax5Seconds } from '../constants';
import { noop, retryWrapper, sleep } from '../helpers';
import { IApplication } from '../types';
// tslint:disable-next-line: no-var-requires no-require-imports
const clipboardy = require('clipboardy');
// const autoCompletionListItemSlector = '.editor-widget.suggest-widget.visible .monaco-list-row a.label-name .monaco-highlighted-label';
When('I create a new file', async function() {
await this.app.documents.createNewUntitledFile();
});
// Create a file in the editor by opening an editor and pasting the code.
// Sending text to the editor is the same as manually typging code.
// This can cause issues, e.g. vsc will auto complete brackets, etc...
// Easiest option, paste the text into the editor.
When('I create a new file with the following content', async function(contents: string) {
await this.app.documents.createNewUntitledFile();
await clipboardy.write(contents);
await this.app.quickopen.runCommand('Paste');
// Wait for text to get pasted and UI to get updated.
await sleep(200);
});
Given('a file named {string} is created with the following content', async function(filename: string, contents: string) {
const fullpath = path.join(this.app.workspacePathOrFolder, filename);
await fs.ensureDir(path.dirname(fullpath));
await fs.writeFile(fullpath, contents);
// Ensure VS Code has had time to refresh to explorer and is aware of the file.
// Else if we later attempt to open this file, VSC might not be aware of it and woudn't display anything in the `quick open` dropdown.
const openRecentlyCreatedDocument = async () => {
await this.app.documents.refreshExplorer();
await this.app.quickopen.openFile(path.basename(filename));
await this.app.quickopen.runCommand('View: Close Editor');
};
await retryWrapper({ timeout: 5000 }, openRecentlyCreatedDocument);
});
When('I change the language of the file to {string}', async function(language: string) {
await this.app.quickopen.runCommand('Change Language Mode');
await this.app.quickinput.select({ value: language });
});
When('I go to line {int}', async function(line: number) {
await this.app.documents.gotToPosition({ line });
});
When('I go to line {int}, column {int}', async function(line: number, column: number) {
await this.app.documents.gotToPosition({ line, column });
});
Given('the file {string} is open', async function(file: string) {
await this.app.quickopen.openFile(file);
});
When('I open the file {string}', async function(file: string) {
await this.app.quickopen.openFile(file);
});
// Wait for some time, possible UI hasn't been updated.
// Its been observed that 2 seconds isn't enough on Mac for Jedi/LS (go to definition).
Then('the cursor is on line {int}', CucumberRetryMax10Seconds, async function(lineNumber: number) {
const { line } = await this.app.documents.getCurrentPosition();
assert.equal(line, lineNumber, `Line number ${line} is not same as expected ${lineNumber}`);
});
// Wait for some time, possible UI hasn't been updated.
// Its been observed that 2 seconds isn't enough on Mac for Jedi/LS (go to definition).
Then('auto completion list contains the item {string}', CucumberRetryMax5Seconds, async function(label: string) {
// tslint:disable-next-line: no-console
const labels = await this.app.documents.getAutoCompletionList();
expect(labels).to.contain(label, `Label '${label}' not found in [${labels.join(',')}]`);
});
Then('the file {string} will be opened', async function(file: string) {
await this.app.documents.waitUntilFileOpened(file);
});
Then('the file {string} is opened', async function(file: string) {
await this.app.documents.waitUntilFileOpened(file);
});
// Then('a file named {string} is created with the following content', async (fileName: string, contents: string) => {
// const fullFilePath = path.join(context.app.workspacePathOrFolder, fileName);
// await fs.mkdirp(path.dirname(fullFilePath)).catch(noop);
// await fs.writeFile(fullFilePath, contents);
// await sleep(1000);
// });
// When('the file {string} has the following content', async (fileName: string, contents: string) => {
// const fullFilePath = path.join(context.app.workspacePathOrFolder, fileName);
// await fs.mkdirp(path.dirname(fullFilePath)).catch(noop);
// await fs.writeFile(fullFilePath, contents);
// await sleep(1000);
// });
Given('a file named {string} does not exist', async function(fileName: string) {
const fullFilePath = path.join(this.app.workspacePathOrFolder, fileName);
await fs.unlink(fullFilePath).catch(noop);
});
Given('the file {string} does not exist', async function(fileName: string) {
const fullFilePath = path.join(this.app.workspacePathOrFolder, fileName);
await fs.unlink(fullFilePath).catch(noop);
await sleep(1000);
});
// Then('a file named {string} exists', async (fileName: string) => {
// const fullFilePath = path.join(context.app.workspacePathOrFolder, fileName);
// const exists = await fs.pathExists(fullFilePath);
// expect(exists).to.equal(true, `File '${fullFilePath}' should exist`);
// });
async function expectFile(app: IApplication, fileName: string, timeout = 1000) {
const checkFile = async () => {
const fullFilePath = path.join(app.workspacePathOrFolder, fileName);
const exists = await fs.pathExists(fullFilePath);
assert.ok(exists, `File '${fullFilePath}' should exist`);
};
await retryWrapper({ timeout }, checkFile);
}
Then('a file named {string} will be created', async function(fileName: string) {
await expectFile(this.app, fileName);
});
Then('a file named {string} is created', async function(fileName: string) {
await expectFile(this.app, fileName);
});
Then('a file named {string} is created within {int} seconds', async function(fileName: string, seconds: number) {
await expectFile(this.app, fileName, seconds * 1000);
});
// When(/^I press (.*)$/, async (key: string) => {
// await context.app.code.dispatchKeybinding(key);
// });
// When('I press {word} {int} times', async (key: string, counter: number) => {
// for (let i = 0; i <= counter; i += 1) {
// await context.app.code.dispatchKeybinding(key);
// }
// });
// Then('code lens {string} is visible in {int} seconds', async (title: string, timeout: number) => {
// const retryInterval = 200;
// const retryCount = timeout * 1000 / 200;
// const eles = await context.app.code.waitForElements('div[id="workbench.editors.files.textFileEditor"] span.codelens-decoration a', true, undefined, retryCount, retryInterval);
// const expectedLenses = eles.filter(item => item.textContent.trim().indexOf(title) === 0);
// expect(expectedLenses).to.be.lengthOf.greaterThan(0);
// });
// Then('code lens {string} is visible', async (title: string) => {
// const eles = await context.app.code.waitForElements('div[id="workbench.editors.files.textFileEditor"] span.codelens-decoration a', true);
// const expectedLenses = eles.filter(item => item.textContent.trim().indexOf(title) === 0);
// expect(expectedLenses).to.be.lengthOf.greaterThan(0);
// });
// Given('the file {string} does not exist', async (file: string) => {
// const filePath = path.join(context.app.workspacePathOrFolder, file);
// if (await fs.pathExists(filePath)) {
// await fs.unlink(filePath);
// }
// });
// When('I open the file {string}', async (file: string) => {
// await context.app.workbench.quickopen.openFile(file);
// });
// Given('the file is scrolled to the top', async () => {
// await context.app.workbench.quickopen.runCommand('Go to Line...');
// await context.app.workbench.quickopen.waitForQuickOpenOpened(10);
// await context.app.code.dispatchKeybinding('1');
// await context.app.code.dispatchKeybinding('Enter');
// await sleep(100);
// });
// Given('the file {string} is updated with the value {string}', async (file: string, value: string) => {
// await fs.writeFile(path.join(context.app.workspacePathOrFolder, file), value);
// });
// When('I update file {string} with value {string}', async (file: string, value: string) => {
// await fs.writeFile(path.join(context.app.workspacePathOrFolder, file), value);
// });
// When('I select the text {string} in line {int} of file {string}', async (selection: string, line: number, file: string) => {
// await context.app.workbench.editor.clickOnTerm(file, selection, line);
// });
// When('I set cursor to line {int} of file {string}', async (line: number, file: string) => {
// await context.app.workbench.editor.waitForEditorFocus(file, line);
// });
// When('I press {string}', async (keyStroke: string) => {
// await context.app.code.dispatchKeybinding(keyStroke);
// });
// Then('line {int} of file {string} will be highlighted', async (line: number, file: string) => {
// await context.app.workbench.editor.waitForHighlightingLine(file, line);
// });
// Then('text {string} will appear in the file {string}', async (text: number, file: string) => {
// await context.app.workbench.editor.waitForEditorContents(file, contents => contents.indexOf(`${text}`) > -1);
// });
// When('I type the text {string} into the file {string}', async (text: string, file: string) => {
// await context.app.workbench.editor.waitForTypeInEditor(file, text);
// });
// When('I go to definition for {string} in line {int} of file {string}', async (selection: string, line: number, file: string) => {
// await context.app.workbench.quickopen.openFile(file);
// await context.app.workbench.editor.clickOnTerm(file, selection, line);
// await context.app.code.dispatchKeybinding('right');
// await context.app.code.dispatchKeybinding('F12');
// });

Просмотреть файл

@ -0,0 +1,112 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
// tslint:disable: no-invalid-this
import { Given, When } from 'cucumber';
import { ensurePackageIsInstalled, ensurePackageIsNotInstalled } from '../helpers/python';
When('I select the Python Interpreter containing the text {string}', async function(text: string) {
await this.app.interpreters.select({ name: text });
});
// When('I select the default mac Interpreter', async () => {
// await context.app.workbench.interpreters.selectInterpreter({ tooltip: '/usr/bin/python' });
// });
Given('the package {string} is not installed', async function(moduleName: string) {
await ensurePackageIsNotInstalled(this.options.pythonPath, moduleName);
});
When('I install the package {string}', async function(moduleName: string) {
await ensurePackageIsInstalled(this.options.pythonPath, moduleName);
});
When('I uninstall the package {string}', async function(moduleName: string) {
await ensurePackageIsInstalled(this.options.pythonPath, moduleName);
});
Given('the package {string} is installed', async function(moduleName: string) {
await ensurePackageIsInstalled(this.options.pythonPath, moduleName);
});
// Given('there are no pipenv environments', async () => {
// await deletePipEnv(context.app);
// });
// Given('there are no virtual environments in the workspace', async () => {
// await deleteVenvs(context.app);
// });
// Given('there are no virtual environments in the workspace', async () => {
// await deleteVenvs(context.app);
// });
// Given('some random interpreter is selected', async () => {
// await selectGenericInterpreter(context.app);
// });
// When('I select some random interpreter', async () => {
// await selectGenericInterpreter(context.app);
// });
// When('I create a pipenv environment', async () => {
// await createPipEnv(context.app.activeEnvironment as PipEnvEnviroment, context.app);
// });
// When('I create a venv environment with the name {string}', async (venvName: string) => {
// const venvEnv = context.app.activeEnvironment as VenvEnviroment;
// venvEnv.venvArgs = [venvName];
// await createVenv(venvEnv, context.app);
// });
// When('I change the python path in settings.json to {string}', async (pythonPath: string) => {
// await updateSetting('python.pythonPath', pythonPath, context.app.workspacePathOrFolder);
// });
// When('I select a python interpreter', async () => {
// await updateSetting('python.pythonPath', context.app.activeEnvironment.pythonPath!, context.app.workspacePathOrFolder);
// await sleep(1000);
// });
// Given('there is no python path in settings.json', async () => {
// await removeSetting('python.pythonPath', context.app.workspacePathOrFolder);
// });
// Then('settings.json will automatically be updated with pythonPath', { timeout: 60000 }, async () => {
// const currentPythonPath = await getSetting<string | undefined>('python.pythonPath', context.app.workspacePathOrFolder);
// assert.notEqual(currentPythonPath, undefined);
// await interpreterInStatusBarDisplaysCorrectPath(currentPythonPath!, context.app);
// });
// Then('the selected interpreter contains the name {string}', async (name: string) => {
// const pythonPathInSettings = await getSetting<string>('python.pythonPath', context.app.workspacePathOrFolder);
// const tooltip = getDisplayPath(pythonPathInSettings, context.app.workspacePathOrFolder);
// const text = await context.app.workbench.statusbar.waitForStatusbarLinkText(tooltip);
// assert.notEqual(text.indexOf(name), -1, `'${name}' not found in display name`);
// });
// Then('a message containing the text {string} will be displayed', async (message: string) => {
// await context.app.workbench.quickinput.waitForMessage(message);
// try {
// await sleep(100);
// await context.app.code.waitAndClick('.action-label.icon.clear-notification-action');
// await sleep(100);
// await context.app.code.waitAndClick('.action-label.icon.clear-notification-action');
// await sleep(100);
// } catch {
// // Do nothing.
// }
// });
// Then('interpreter informantion in status bar has refreshed', async () => {
// const tooltip = getDisplayPath(context.app.activeEnvironment.pythonPath!, context.app.workspacePathOrFolder);
// const text = await context.app.workbench.statusbar.waitForStatusbarLinkText(tooltip);
// context.app.activeEnvironment.displayNameParts.forEach(item => {
// // In the case of pipenv environments, the spaces are replaced with '_'.
// const parsed = item.replace('/ /g', '_');
// const found = text.indexOf(item) >= 0 || text.indexOf(parsed) >= 0;
// assert.equal(found, true, `'${item}' not found in display name`);
// });
// });

185
uitests/src/steps/main.ts Normal file
Просмотреть файл

@ -0,0 +1,185 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
import { After, Before, HookScenarioResult, setDefaultTimeout, setDefinitionFunctionWrapper, setWorldConstructor, Status } from 'cucumber';
import * as fs from 'fs-extra';
import * as path from 'path';
import * as rimraf from 'rimraf';
import { extensionActivationTimeout, maxStepTimeout } from '../constants';
import { noop, RetryOptions, retryWrapper } from '../helpers';
import { debug, error, warn } from '../helpers/logger';
import { getTestOptions, restoreDefaultUserSettings, TestOptions, waitForPythonExtensionToActivate } from '../setup';
import { clearWorkspace, dismissMessages, initializeWorkspace } from '../setup/environment';
import { IApplication, ITestOptions } from '../types';
import { Application } from '../vscode';
import { WorldParameters } from './types';
// tslint:disable: no-invalid-this mocha-no-side-effect-code no-any non-literal-require no-function-expression
// keeps track of the fact that we have dismissed onetime messages displayed in VSC.
// E.g. messages such as 'Tip: You can select an interpreter from statusbar'.
// Such messages will keep showing up until they are dismissed by user - never to be displayed again.
// Dismissing such messages makes it easy for testing (less noise when testing messages and less noice in screenshots).
// This will get reset when user loads VSC for first time.
let oneTimeMessagesDismissed = false;
/**
* Context object available in every step.
* Step = BDD Step such as `Given`, `When` and `Then`.
*
* @class MyWorld
*/
class MyWorld {
public readonly app: IApplication;
public readonly options: ITestOptions;
constructor({ parameters, attach }: { attach: Function; parameters: WorldParameters }) {
debug('Start MyWorld contructor');
const testOptions = getTestOptions(parameters.channel, parameters.testDir, parameters.pythonPath, parameters.verboseLogging);
this.app = new Application(testOptions);
this.app.on('start', emulateFirstTimeLoad => (emulateFirstTimeLoad ? (oneTimeMessagesDismissed = false) : undefined));
this.app.on('screenshotCatured', data => attach(data, 'image/png'));
this.options = testOptions;
debug('End MyWorld contructor');
}
}
declare module 'cucumber' {
/**
* Context object available in every step.
* Step = BDD Step such as `Given`, `When` and `Then`.
*
* @export
* @interface World
*/
// tslint:disable-next-line: interface-name
export interface World {
app: IApplication;
options: ITestOptions;
}
}
setWorldConstructor(MyWorld);
// We might have steps that are slow, hence allow max timeouts of 2 minutes.
// Also easy for debugging.
setDefaultTimeout(maxStepTimeout);
// const lastSetWorkspaceFolder = '';
Before(async function(scenario: HookScenarioResult) {
debug('Start Before');
const options = (this.app as Application).options as TestOptions;
await options.updateForScenario(scenario);
// Initialize the workspace with the required code.
// I.e. if required download the source that's meant to be used for testing (from a git repo).
// Optionally if we're to use a new sub directory in the repo, then update the workspace folder accordingly.
const newWorkspaceFolder = await initializeWorkspace(scenario, this.app.workspacePathOrFolder);
if (newWorkspaceFolder) {
options.udpateWorkspaceFolder(newWorkspaceFolder);
}
// These must never change (we control the test environment, hence we need to ensure `settings.json` is as expected).
// For every test this will be reset (possible a test had updated the user settings).
await restoreDefaultUserSettings(options);
await this.app.start();
// Activating extension can be slow on windows.
await waitForPythonExtensionToActivate(extensionActivationTimeout, this.app);
debug('Waiting for VSC & Python Extension to display its messages, so they can be dimissed');
// Rather than waiting & then dismissing messages, just keep retrying to dismiss messages for 5 seconds.
const dismiss = async () => dismissMessages(this.app).then(() => Promise.reject());
const timeout = oneTimeMessagesDismissed ? 5000 : 1000;
await retryWrapper({ timeout, logFailures: false }, dismiss).catch(noop);
// eslint-disable-next-line require-atomic-updates
oneTimeMessagesDismissed = true;
// Since we activated the python extension, lets close and reload.
// For the tests the extension should not be activated (the tests will do that).
await this.app.reload();
});
After(async function(scenario: HookScenarioResult) {
// Close all editors, etc.
// Finally reset user history.
// performance.mark(`${scenario.pickle.name}-after-start`);
try {
if (this.app.isAlive) {
// Capture a screenshot after every scenario.
// Whether it fails or not (very useful when trying to figure out whether a test actually ran, why it failed, etc).
const name = `After_${new Date().getTime()}`;
// If VS Code has died, then ignore the errors (capturing screenshots will fail).
await this.app.captureScreenshot(name).catch(warn.bind(warn, 'Failed to capture after hook screenshot.'));
await dismissMessages(this.app);
// If VS Code has died, then ignore the errors (clearing workspace using `commands` from the `command palette` will fail).
await clearWorkspace(this.app).catch(warn.bind(warn, 'Failed to clear the workspace.'));
}
} catch (ex) {
// Handle exception as cucumber doesn't handle (log) errors in hooks too well.
// Basically they aren't logged, i.e. get swallowed up.
error('After hook failed', ex);
throw ex;
} finally {
await this.app.exit().catch(warn.bind(warn, 'Failed to exit in After hook'));
const options = (this.app as Application).options;
if (scenario.result.status === Status.PASSED) {
// If the tests have passed, then delete everything related to previous tests.
// Delete screenshots, logs, everything that's transient.
await Promise.all([
new Promise(resolve => rimraf(options.logsPath, resolve)).catch(noop),
new Promise(resolve => rimraf(options.reportsPath, resolve)).catch(noop),
new Promise(resolve => rimraf(options.screenshotsPath, resolve)).catch(noop),
new Promise(resolve => rimraf(options.tempPath, resolve)).catch(noop),
new Promise(resolve => rimraf(options.workspacePathOrFolder, resolve)).catch(noop)
]);
} else {
// Ok, test failed, copy everythign we'll need to triage this issue.
// State of the workspace folder, logs, screenshots, everything.
// Rememeber, screenshots are specific to each test (hence they are preserved as long as we don't delete them).
await fs.copy(options.workspacePathOrFolder, path.join(options.reportsPath, 'workspace folder'));
await fs.copyFile(options.userSettingsFilePath, path.join(options.reportsPath, 'user_settings.json'));
await Promise.all([
new Promise(resolve => rimraf(options.workspacePathOrFolder, resolve)).catch(noop),
new Promise(resolve => rimraf(options.tempPath, resolve)).catch(noop)
]);
}
}
});
/*
* Create a wrapper for all steps to re-try if the step is configured for retry.
* (its possible the UI isn't ready, hence we need to re-try some steps).
*
* Cast to any as type definitions setDefinitionFunctionWrapper is wrong.
*/
type AsyncFunction = (...args: any[]) => Promise<any>;
(setDefinitionFunctionWrapper as any)(function(fn: Function, opts?: { retry?: RetryOptions }) {
return async function(this: {}) {
const args = [].slice.call(arguments);
if (!opts || !opts.retry) {
return fn.apply(this, args);
}
return retryWrapper.bind(this)(opts.retry, fn as AsyncFunction, ...args);
};
});
// /*
// Capture screenshots after every step.
// */
// (setDefinitionFunctionWrapper as any)(function (fn: Function) {
// async function captureScreenshot() {
// try {
// const name = `After_${new Date().getTime()}`.replace(/[^a-z0-9\-]/gi, '_');
// // Ignore errors, as its possible app hasn't started.
// await context.app.captureScreenshot(name).catch(noop);
// } catch { noop(); }
// }
// return async function (this: {}) {
// const result = fn.apply(this, [].slice.call(arguments));
// if (result.then) {
// return (result as Promise<any>).finally(captureScreenshot);
// } else {
// await captureScreenshot();
// return result;
// }
// };
// });

Просмотреть файл

@ -0,0 +1,85 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
// tslint:disable: no-invalid-this
import * as assert from 'assert';
import { expect } from 'chai';
import { Then } from 'cucumber';
import { CucumberRetryMax20Seconds } from '../constants';
import { retryWrapper, sleep } from '../helpers';
import { debug } from '../helpers/logger';
import { IApplication } from '../types';
async function notificationDisplayed(app: IApplication, message: string, timeout: number = 10_000) {
async function checkMessages() {
const hasMessages = await app.notifications.hasMessages();
debug(`Has Messages ${hasMessages}`);
expect(hasMessages).to.be.equal(true, 'No messages displayed');
const messages = await app.notifications.getMessages();
if (messages.findIndex(item => item.toLowerCase().indexOf(message.toLowerCase()) >= 0) === -1) {
assert.fail(`Message '${message}' not found in [${messages.join(',')}]`);
}
}
await retryWrapper({ timeout }, checkMessages);
}
Then('no notifications are displayed', async function() {
const hasMessages = await this.app.notifications.hasMessages();
assert.ok(!hasMessages);
});
Then('no error notifications are displayed', async function() {
const hasMessages = await this.app.notifications.hasMessages('error');
assert.ok(!hasMessages);
});
Then('a message with the text {string} is displayed', async function(message: string) {
await notificationDisplayed(this.app, message);
});
Then('a message containing the text {string} is displayed', async function(message: string) {
await notificationDisplayed(this.app, message);
});
Then('a message containing the text {string} will be displayed within {int} seconds', async function(message: string, timeoutSeconds: number) {
await notificationDisplayed(this.app, message, timeoutSeconds * 1000);
});
/**
* Checks whether a message is not displayed.
* If it is, then an assertion error is thrown.
*
* @param {string} message
* @returns
*/
async function messageIsNotDisplayed(app: IApplication, message: string) {
// Wait for a max of 5 seconds for messages to appear.
// If it doesn't appear within this period, then assume everyting is ok.
await sleep(5000);
const hasMessages = await app.notifications.hasMessages();
if (!hasMessages) {
return;
}
const messages = await app.notifications.getMessages();
if (messages.findIndex(item => item.toLowerCase().indexOf(message.toLowerCase()) >= 0) !== -1) {
assert.fail(`Message '${message}' found in [${messages.join(',')}]`);
}
}
Then('a message containing the text {string} is not displayed', async function(message: string) {
await messageIsNotDisplayed(this.app, message);
});
Then('I click the {string} button for the message with the text {string}', CucumberRetryMax20Seconds, async function(button: string, message: string) {
await notificationDisplayed(this.app, message);
await this.app.notifications.dismiss([{ buttonText: button, content: message }], 2);
// We might have to retry closing the message as its possible a new message was displayed in the mean time.
// In which case closing the message won't work.
// Imagine you as a user are about to close a message, then a new message appears! It doesn't work!
await messageIsNotDisplayed(this.app, message);
// Wait for state to get updated (e.g. if we're dismissing one time messages, then this state needs to be persisted).
await sleep(500);
});

Просмотреть файл

@ -0,0 +1,18 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
// tslint:disable: no-invalid-this
import { Then } from 'cucumber';
import '../helpers/extensions';
type TextOrWordOrContent = 'text' | 'word' | 'message' | 'content';
Then('the {word} {string} will be displayed in the output panel', async function(_textOrMessage: TextOrWordOrContent, text: string) {
await this.app.panels.waitUtilContent(text);
});
Then('the {word} {string} will be displayed in the output panel within {int} seconds', async function(_textOrMessage: TextOrWordOrContent, text: string, timeoutSeconds: number) {
await this.app.panels.waitUtilContent(text, timeoutSeconds);
});

Просмотреть файл

@ -0,0 +1,40 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
// tslint:disable: no-invalid-this
import * as assert from 'assert';
import { expect } from 'chai';
import { Then } from 'cucumber';
import { CucumberRetryMax5Seconds } from '../constants';
// Wait for some time as it take take at least 1s to appear.
// Surely problems won't take more than 5 seconds to appear.
// Why 5? Well, needs to be > 1, but most certainly not more than 5.
Then('there are no problems in the problems panel', CucumberRetryMax5Seconds, async function() {
const count = await this.app.problems.getProblemCount();
assert.equal(count, 0);
});
Then('there is at least one problem in the problems panel', CucumberRetryMax5Seconds, async function() {
const count = await this.app.problems.getProblemCount();
expect(count).to.greaterThan(0);
});
Then('there are at least {int} problems in the problems panel', CucumberRetryMax5Seconds, async function(expectedMinimumCount: number) {
const count = await this.app.problems.getProblemCount();
expect(count).to.greaterThan(expectedMinimumCount - 1);
});
Then('there is a problem with the message {string}', CucumberRetryMax5Seconds, async function(message: string) {
const messages = await this.app.problems.getProblemMessages();
expect(messages.join(', ').toLowerCase()).to.include(message.toLowerCase());
});
Then('there is a problem with the file named {string}', CucumberRetryMax5Seconds, async function(fileName: string) {
const messages = await this.app.problems.getProblemFiles();
expect(messages.join(', ').toLowerCase()).to.include(fileName.toLowerCase());
});

Просмотреть файл

@ -0,0 +1,62 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
// tslint:disable: no-invalid-this
import * as assert from 'assert';
import { Given, Then, When } from 'cucumber';
import { ConfigurationTarget } from '../types';
const translateType = (type: SettingType) => (type === 'user' ? ConfigurationTarget.Global : ConfigurationTarget.WorkspaceFolder);
type SettingType = 'user' | 'workspaceFolder' | 'workspace';
type EnabledOrDisabled = 'enabled' | 'disabled';
type EnabledOrDisabledOrRemove = 'enable' | 'disable' | 'remove';
Given('the {word} setting {string} is {word}', async function(type: SettingType, setting: string, enabledOrDisabled: EnabledOrDisabled) {
await this.app.settings.updateSetting(setting, enabledOrDisabled === 'enabled', translateType(type));
});
Given('the {word} setting {string} has the value {string}', async function(type: SettingType, setting: string, value: string) {
await this.app.settings.updateSetting(setting, value, translateType(type));
});
Given('the {word} setting {string} has the value {int}', async function(type: SettingType, setting: string, value: number) {
await this.app.settings.updateSetting(setting, value, translateType(type));
});
Given('the {word} setting {string} does not exist', async function(type: SettingType, setting: string) {
await this.app.settings.updateSetting(setting, void 0, translateType(type));
});
When('I {word} the {word} setting {string}', async function(change: EnabledOrDisabledOrRemove, type: SettingType, setting: string) {
const newValue = change === 'remove' ? void 0 : change === 'enable';
await this.app.settings.updateSetting(setting, newValue, translateType(type));
});
When('I update the {word} setting {string} with the value {string}', async function(type: SettingType, setting: string, value: string) {
await this.app.settings.updateSetting(setting, value, translateType(type));
});
When('I update the {word} setting {string} with the value {int}', async function(type: SettingType, setting: string, value: number) {
await this.app.settings.updateSetting(setting, value, translateType(type));
});
Then('the {word} setting {string} will be {word}', async function(type: SettingType, setting: string, enabledOrDisabled: EnabledOrDisabled) {
const value = await this.app.settings.getSetting<boolean>(setting, translateType(type));
assert.equal(value, enabledOrDisabled === 'enabled');
});
Then('the workspace setting {string} does not exist', async function(setting: string) {
const value = await this.app.settings.getSetting<boolean>(setting, ConfigurationTarget.WorkspaceFolder);
assert.equal(value, undefined);
});
Then('the workspace setting {string} has the value {string}', async function(setting: string, expectedValue: string) {
const value = await this.app.settings.getSetting<string>(setting, ConfigurationTarget.WorkspaceFolder);
assert.equal(value, expectedValue);
});
Then('the workspace setting {string} has the value {int}', async function(setting: string, expectedValue: number) {
const value = await this.app.settings.getSetting<number>(setting, ConfigurationTarget.WorkspaceFolder);
assert.equal(value, expectedValue);
});
Then('the workspace setting {string} contains the value {string}', async function(setting: string, expectedValue: string) {
const value = await this.app.settings.getSetting<string>(setting, ConfigurationTarget.WorkspaceFolder);
assert.notEqual(value, undefined);
assert.equal(value!.indexOf(expectedValue) >= 0, expectedValue);
});

Просмотреть файл

@ -0,0 +1,27 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
// tslint:disable: no-invalid-this
import { expect } from 'chai';
import { Then } from 'cucumber';
import { CucumberRetryMax5Seconds } from '../constants';
import '../helpers/extensions';
// Add a delay, as this can take around 1s (from the time something was selected).
Then('the python the status bar contains the text {string}', CucumberRetryMax5Seconds, async function(text: string) {
const statubarText = await this.app.statusbar.getPythonStatusBarText();
expect(statubarText).contains(text);
});
// Add a delay, as this can take around 1s (from the time something was selected).
Then('the python the status bar does not contain the text {string}', CucumberRetryMax5Seconds, async function(text: string) {
const statubarText = await this.app.statusbar.getPythonStatusBarText();
expect(statubarText).not.contains(text);
});
// Then('the python the status bar is not visible', async () => {
// await context.app.workbench.statusbar.pythonStatusBarElementIsNotVisible();
// });

Просмотреть файл

@ -0,0 +1,212 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
// tslint:disable: no-invalid-this
import { expect } from 'chai';
import { Then, When } from 'cucumber';
import { CucumberRetryMax5Seconds } from '../constants';
import { IApplication, TestExplorerNodeStatus } from '../types';
Then('the test explorer icon will be visible', async function() {
await this.app.testExplorer.waitUntilIconVisible(5_000);
});
// Surely tests can't take more than 30s to get discovered.
When('I wait for test discovery to complete', async function() {
await this.app.testExplorer.waitUntilTestsStop(30_000);
});
// Surely pythonn tests (in our UI Tests) can't take more than 30s to run.
When('I wait for tests to complete running', async function() {
await this.app.testExplorer.waitUntilTestsStop(30_000);
});
Then('there are {int} nodes in the test explorer', CucumberRetryMax5Seconds, async function(expectedCount: number) {
const count = await this.app.testExplorer.getNodeCount();
expect(count).to.equal(expectedCount);
});
Then('all of the test tree nodes have a progress icon', CucumberRetryMax5Seconds, async function() {
const elements = await this.app.testExplorer.getNodes();
const progressCount = elements.filter(node => node.status === 'Progress').length;
expect(progressCount).to.equal(elements.length);
});
async function getNumberOfNodesWithIcon(app: IApplication, status: TestExplorerNodeStatus): Promise<number> {
const elements = await app.testExplorer.getNodes();
return elements.filter(node => node.status === status).length;
}
Then('{int} nodes in the test explorer have a status of "{word}"', CucumberRetryMax5Seconds, async function(count: number, status: TestExplorerNodeStatus) {
const nodeCount = await getNumberOfNodesWithIcon(this.app, status);
expect(nodeCount).to.equal(count);
});
Then('1 node in the test explorer has a status of "{word}"', CucumberRetryMax5Seconds, async function(status: TestExplorerNodeStatus) {
const nodeCount = await getNumberOfNodesWithIcon(this.app, status);
expect(nodeCount).to.equal(1);
});
Then('the node {string} in the test explorer has a status of "{word}"', CucumberRetryMax5Seconds, async function(label: string, status: TestExplorerNodeStatus) {
const node = await this.app.testExplorer.getNode(label);
expect(node.status).to.equal(status);
});
Then('the stop icon is visible in the toolbar', async function() {
await this.app.testExplorer.waitUntilToolbarIconVisible('Stop');
});
Then('the run failed tests icon is visible in the toolbar', async function() {
await this.app.testExplorer.waitUntilToolbarIconVisible('RunFailedTests');
});
Then('I stop discovering tests', async function() {
await this.app.testExplorer.clickToolbarIcon('Stop');
});
When('I stop running tests', async function() {
await this.app.testExplorer.clickToolbarIcon('Stop');
});
When('I run failed tests', async function() {
await this.app.testExplorer.clickToolbarIcon('RunFailedTests');
});
Then('the stop icon is not visible in the toolbar', async function() {
await this.app.testExplorer.waitUntilToolbarIconVisible('Stop');
});
When('I click the test node with the label {string}', async function(label: string) {
await this.app.testExplorer.clickNode(label);
});
When('I navigate to the code associated with the test node {string}', async function(label: string) {
await this.app.testExplorer.selectActionForNode(label, 'open');
});
// tslint:disable: no-invalid-this no-any restrict-plus-operands no-console
When('I debug the node {string} from the test explorer', async function(label: string) {
await this.app.testExplorer.selectActionForNode(label, 'debug');
});
When('I run the node {string} from the test explorer', async function(label: string) {
await this.app.testExplorer.selectActionForNode(label, 'run');
});
When('I expand all of the nodes in the test explorer', async function() {
await this.app.testExplorer.expandNodes();
});
// Given('the test framework is {word}', async function (testFramework: string) {
// await updateSetting('python.unitTest.nosetestsEnabled', testFramework === 'nose', context.app.workspacePathOrFolder);
// await updateSetting('python.unitTest.pyTestEnabled', testFramework === 'pytest', context.app.workspacePathOrFolder);
// await updateSetting('python.unitTest.unittestEnabled', testFramework === 'unittest', context.app.workspacePathOrFolder);
// });
// Then('wait for the test icon to appear within {int} seconds', async function (timeout: number) {
// const icon = '.part.activitybar.left .composite-bar li a[title="Test"]';
// await context.app.code.waitForElement(icon, undefined, timeout * 1000 / 250, 250);
// await sleep(250);
// });
// Then('wait for the toolbar button with the text {string} to appear within {int} seconds', async function (title: string, timeout: number) {
// const button = `div[id = "workbench.parts.sidebar"] a[title = "${title}"]`;
// await context.app.code.waitForElement(button, undefined, timeout * 1000 / 250, 250);
// await sleep(1000);
// Then('the toolbar button with the text {string} is visible', async function (title: string) {
// });
// await context.app.code.waitForElement(`div[id = "workbench.parts.sidebar"] a[title = "${title}"]`);
// });
// Then('the toolbar button with the text {string} is not visible', async function (title: string) {
// const eles = await context.app.code.waitForElements('div[id="workbench.parts.sidebar"] ul[aria-label="PYTHON actions"] li a', true);
// assert.equal(eles.find(ele => ele.attributes['title'] === title), undefined);
// });
// Then('select first node', async function () {
// // await context.app.code.waitAndClick('div[id="workbench.view.extension.test"] div.has-children:nth-child(1) a.label-name:nth-child(1n)');
// await context.app.code.waitAndClick('div[id="workbench.view.extension.test"] div.monaco-tree-row:nth-child(1) a.label-name:nth-child(1n)');
// });
// Then('select second node', async function () {
// // await context.app.code.waitAndClick('div[id="workbench.view.extension.test"] div.has-children:nth-child(2) a.label-name:nth-child(1n)');
// await context.app.code.waitAndClick('div[id="workbench.view.extension.test"] div.monaco-tree-row:nth-child(2) a.label-name:nth-child(1n)');
// });
// Then('has {int} error test items', async function (count: number) {
// const eles = await context.app.code.waitForElements('div[id="workbench.view.extension.test"] div.custom-view-tree-node-item-icon[style^="background-image:"][style*="status-error.svg"]', true);
// assert.equal(eles.length, count);
// });
// Then('there are at least {int} error test items', async function (count: number) {
// const eles = await context.app.code.waitForElements('div[id="workbench.view.extension.test"] div.custom-view-tree-node-item-icon[style^="background-image:"][style*="status-error.svg"]', true);
// expect(eles).to.be.lengthOf.greaterThan(count - 1);
// });
// Then('there are at least {int} error test items', async function (count: number) {
// const eles = await context.app.code.waitForElements('div[id="workbench.view.extension.test"] div.custom-view-tree-node-item-icon[style^="background-image:"][style*="status-error.svg"]', true);
// expect(eles).to.be.lengthOf.greaterThan(count - 1);
// });
// Then('there are {int} success test items', async function (count: number) {
// const eles = await context.app.code.waitForElements('div[id="workbench.view.extension.test"] div.custom-view-tree-node-item-icon[style^="background-image:"][style*="status-ok.svg"]', true);
// assert.equal(eles.length, count);
// });
// Then('there are {int} running test items', async function (count: number) {
// const eles = await context.app.code.waitForElements('div[id="workbench.view.extension.test"] div.custom-view-tree-node-item-icon[style^="background-image:"][style*="discovering-tests.svg"]', true);
// assert.equal(eles.length, count);
// });
// Then('there are at least {int} running test items', async function (count: number) {
// const eles = await context.app.code.waitForElements('div[id="workbench.view.extension.test"] div.custom-view-tree-node-item-icon[style^="background-image:"][style*="discovering-tests.svg"]', true);
// expect(eles).to.be.lengthOf.greaterThan(count - 1);
// });
// When('I select test tree node number {int} and press run', async function (nodeNumber: number) {
// await highlightNode(nodeNumber);
// const selector = `div.monaco - tree - row: nth - child(${ nodeNumber }) div.monaco - icon - label.custom - view - tree - node - item - resourceLabel > div.actions > div > ul a[title = "Run"]`;
// await context.app.code.waitAndClick(selector);
// });
// When('I select test tree node number {int} and press open', async function (nodeNumber: number) {
// await highlightNode(nodeNumber);
// const selector = `div.monaco - tree - row: nth - child(${ nodeNumber }) div.monaco - icon - label.custom - view - tree - node - item - resourceLabel a[title = "Open"]`;
// await context.app.code.waitAndClick(selector);
// });
// When('I select test tree node number {int} and press debug', async function (nodeNumber: number) {
// await highlightNode(nodeNumber);
// const selector = `div.monaco - tree - row: nth - child(${ nodeNumber }) div.monaco - icon - label.custom - view - tree - node - item - resourceLabel a[title = "Debug"]`;
// await context.app.code.waitAndClick(selector);
// });
// When('I select test tree node number {int}', async function (nodeNumber: number) {
// await highlightNode(nodeNumber);
// await context.app.code.waitAndClick(`div[id = "workbench.view.extension.test"] div.monaco - tree - row: nth - child(${ nodeNumber }) a.label - name: nth - child(1n)`);
// });
// When('I stop the tests', async function () {
// const selector = 'div[id="workbench.parts.sidebar"] a[title="Stop"]';
// await context.app.code.waitAndClick(selector);
// });
// Then('stop the tests', async function () {
// await stopRunningTests();
// });
// export async function killRunningTests() {
// try {
// const selector = 'div[id="workbench.parts.sidebar"] a[title="Stop"]';
// await context.app.code.waitForElement(selector, undefined, 1, 100);
// } catch {
// return;
// }
// try {
// await stopRunningTests();
// } catch {
// noop();
// }
// }
// async function stopRunningTests() {
// const selector = 'div[id="workbench.parts.sidebar"] a[title="Stop"]';
// await context.app.code.waitAndClick(selector);
// }
// When('I click first code lens "Run Test"', async function () {
// const selector = 'div[id="workbench.editors.files.textFileEditor"] span.codelens-decoration:nth-child(2) a:nth-child(1)';
// const eles = await context.app.code.waitForElements(selector, true);
// expect(eles[0].textContent).to.contain('Run Test');
// await context.app.code.waitAndClick(selector);
// });
// When('I click first code lens "Debug Test"', async function () {
// const selector = 'div[id="workbench.editors.files.textFileEditor"] span.codelens-decoration:nth-child(2) a:nth-child(3)';
// const eles = await context.app.code.waitForElements(selector, true);
// expect(eles[0].textContent).to.contain('Debug Test');
// await context.app.code.waitAndClick(selector);
// });
// When('I click second code lens "Debug Test"', async function () {
// const selector = 'div[id="workbench.editors.files.textFileEditor"] span.codelens-decoration:nth-child(3) a:nth-child(3)';
// const eles = await context.app.code.waitForElements(selector, true);
// expect(eles[0].textContent).to.contain('Debug Test');
// await context.app.code.waitAndClick(selector);
// });
// When('I click second code lens "Run Test"', async function () {
// const selector = 'div[id="workbench.editors.files.textFileEditor"] span.codelens-decoration:nth-child(3) a:nth-child(1)';
// const eles = await context.app.code.waitForElements(selector, true);
// expect(eles[0].textContent).to.contain('Run Test');
// await context.app.code.waitAndClick(selector);
// });

Просмотреть файл

@ -0,0 +1,8 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
'use strict';
import { Channel } from '../types';
export type WorldParameters = { channel: Channel; testDir: string; verboseLogging: boolean; pythonPath: string };

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше