a = []
while(1):
a.append('LOL')
Google Colab notebooks have an idle timeout of 90 minutes
and absolute timeout of 12 hours. This means, if user does not
interact with his Google Colab notebook for more than 90
minutes, its instance is automatically terminated. Also, maximum
lifetime of a Colab instance is 12 hours.
function ConnectButton(){
console.log("Connect pushed");
document.querySelector("#top-toolbar > colab-connect-button").shadowRoot.querySelector("#connect").click()
}
setInterval(ConnectButton,600000); //600 seconds
from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive
! cp '/content/drive/My Drive/CRAWLLER/crawller part 2.mkv' .
drive.flush_and_unmount()
! git clone https://github.com/Jimut123/CAPTCHA
Cloning into 'CAPTCHA'... remote: Enumerating objects: 147, done. remote: Counting objects: 100% (147/147), done. remote: Compressing objects: 100% (147/147), done. remote: Total 1749 (delta 84), reused 2 (delta 0), pack-reused 1602 Receiving objects: 100% (1749/1749), 627.87 MiB | 36.80 MiB/s, done. Resolving deltas: 100% (504/504), done. Checking out files: 100% (1307/1307), done.
! ls
CAPTCHA 'crawller part 2.mkv' sample_data
! cd CAPTCHA
! ls
CAPTCHA 'crawller part 2.mkv' sample_data
cd CAPTCHA
/content/CAPTCHA
! ls
2006.11373.pdf generators arm_lab_logo_with_title_small_adj_6.png JIMUT_CAPTCHA_BEAMER_SLIDE.pdf captcha.png JIMUT_CAPTCHA_REPORT.pdf CODE_OF_CONDUCT.md pyfiles colab.png README.md download_Project.ipynb
cd ..
/content
! wget "https://storage.googleapis.com/kaggle-data-sets/181273/407317/bundle/archive.zip?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=gcp-kaggle-com%40kaggle-161607.iam.gserviceaccount.com%2F20201013%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20201013T092650Z&X-Goog-Expires=259199&X-Goog-SignedHeaders=host&X-Goog-Signature=99bc4482025cc7f8f6151e1d6f26ce9fd37e5e1d43f73e114e4a8918800d87c66a88bd87351a7f75cbb9ef6d2ebcd6c380f7ad2a091d72be78006205f08b39f0b5b185808521c1251c656d8bc21d9702b95b38eb9438a29e6521fc94aed792ba9e08cbaa48e0758b3806d3e49a5d7fb091456a062580fa07781e8117e2b79266eb672d06f4d904e80920a6f9ea860a8857eecd5dba13433ae6e99155f781c198348854bb5176aff3ceee17dc5ec960c0362cc667513d864ffed591dc6f59061632c785941418174f226c78c5b1c4477f5d37d306a24bfade6487766cd5359095a6db338e2aae90c1433f30d55dcfcbc601b632355d07feac03fd18c5a59cbd73" -O brain_mri.zip
--2020-10-13 10:27:09-- https://storage.googleapis.com/kaggle-data-sets/181273/407317/bundle/archive.zip?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=gcp-kaggle-com%40kaggle-161607.iam.gserviceaccount.com%2F20201013%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20201013T092650Z&X-Goog-Expires=259199&X-Goog-SignedHeaders=host&X-Goog-Signature=99bc4482025cc7f8f6151e1d6f26ce9fd37e5e1d43f73e114e4a8918800d87c66a88bd87351a7f75cbb9ef6d2ebcd6c380f7ad2a091d72be78006205f08b39f0b5b185808521c1251c656d8bc21d9702b95b38eb9438a29e6521fc94aed792ba9e08cbaa48e0758b3806d3e49a5d7fb091456a062580fa07781e8117e2b79266eb672d06f4d904e80920a6f9ea860a8857eecd5dba13433ae6e99155f781c198348854bb5176aff3ceee17dc5ec960c0362cc667513d864ffed591dc6f59061632c785941418174f226c78c5b1c4477f5d37d306a24bfade6487766cd5359095a6db338e2aae90c1433f30d55dcfcbc601b632355d07feac03fd18c5a59cbd73 Resolving storage.googleapis.com (storage.googleapis.com)... 74.125.20.128, 74.125.142.128, 74.125.195.128, ... Connecting to storage.googleapis.com (storage.googleapis.com)|74.125.20.128|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 748584920 (714M) [application/zip] Saving to: ‘brain_mri.zip’ brain_mri.zip 100%[===================>] 713.91M 92.2MB/s in 10s 2020-10-13 10:27:19 (71.7 MB/s) - ‘brain_mri.zip’ saved [748584920/748584920]
! unzip -qq brain_mri.zip
! pip install gdown
Requirement already satisfied: gdown in /usr/local/lib/python3.6/dist-packages (3.6.4) Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from gdown) (1.15.0) Requirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from gdown) (4.41.1) Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from gdown) (2.23.0) Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->gdown) (2020.6.20) Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->gdown) (3.0.4) Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->gdown) (1.24.3) Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->gdown) (2.10)
import gdown
url = 'https://drive.google.com/u/0/uc?id=1vNsxAMmQzgCrA2A2ZrRrm6E8q14Y_c0D'
output = 'DOWNLOAD_FROM_DRIVE.pth' # rename file
gdown.download(url, output, quiet=False)
Downloading... From: https://drive.google.com/u/0/uc?id=1vNsxAMmQzgCrA2A2ZrRrm6E8q14Y_c0D To: /content/DOWNLOAD_FROM_DRIVE.pth 132MB [00:00, 270MB/s]
'DOWNLOAD_FROM_DRIVE.pth'
https://stackoverflow.com/questions/48350226/methods-for-using-git-with-google-colab
from getpass import getpass
import os
os.environ['USER'] = input('Enter the username of your Github account: ')
os.environ['PASSWORD'] = getpass('Enter the password of your Github account: ')
os.environ['REPOSITORY'] = input('Enter the name of the Github repository: ')
os.environ['GITHUB_AUTH'] = os.environ['USER'] + ':' + os.environ['PASSWORD']
!rm -rf $REPOSITORY # To remove the previous clone of the Github repository
!git clone https://$GITHUB_AUTH@github.com/$USER/$REPOSITORY.git
os.environ['USER'] = os.environ['PASSWORD'] = os.environ['REPOSITORY'] = os.environ['GITHUB_AUTH'] = ""
Enter the username of your Github account: Jimut123 Enter the password of your Github account: ·········· Enter the name of the Github repository: ML2Proj Cloning into 'ML2Proj'... remote: Enumerating objects: 15, done. remote: Counting objects: 100% (15/15), done. remote: Compressing objects: 100% (12/12), done. remote: Total 15 (delta 1), reused 12 (delta 1), pack-reused 0 Unpacking objects: 100% (15/15), done.
!nvidia-smi
Tue Oct 13 10:28:29 2020 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 455.23.05 Driver Version: 418.67 CUDA Version: 10.1 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 | | N/A 45C P8 10W / 70W | 0MiB / 15079MiB | 0% Default | | | | ERR! | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=============================================================================| | No running processes found | +-----------------------------------------------------------------------------+
printm()
¶# memory footprint support libraries/code
!ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi
!pip install gputil
!pip install psutil
!pip install humanize
import psutil
import humanize
import os
import GPUtil as GPU
GPUs = GPU.getGPUs()
# XXX: only one GPU on Colab and isn’t guaranteed
gpu = GPUs[0]
def printm():
process = psutil.Process(os.getpid())
print("Gen RAM Free: " + humanize.naturalsize( psutil.virtual_memory().available ), " I Proc size: " + humanize.naturalsize( process.memory_info().rss))
print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal))
printm()
Collecting gputil Downloading https://files.pythonhosted.org/packages/ed/0e/5c61eedde9f6c87713e89d794f01e378cfd9565847d4576fa627d758c554/GPUtil-1.4.0.tar.gz Building wheels for collected packages: gputil Building wheel for gputil (setup.py) ... done Created wheel for gputil: filename=GPUtil-1.4.0-cp36-none-any.whl size=7411 sha256=6677d28561a033f8afba7fe83eb8d682779da65fd4061aceccd22fd85f49399b Stored in directory: /root/.cache/pip/wheels/3d/77/07/80562de4bb0786e5ea186911a2c831fdd0018bda69beab71fd Successfully built gputil Installing collected packages: gputil Successfully installed gputil-1.4.0 Requirement already satisfied: psutil in /usr/local/lib/python3.6/dist-packages (5.4.8) Requirement already satisfied: humanize in /usr/local/lib/python3.6/dist-packages (0.5.1) Gen RAM Free: 12.8 GB I Proc size: 123.0 MB GPU RAM Free: 15079MB | Used: 0MB | Util 0% | Total 15079MB
printm()
Gen RAM Free: 12.8 GB I Proc size: 123.0 MB GPU RAM Free: 15079MB | Used: 0MB | Util 0% | Total 15079MB
!ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi
!pip install gputil
!pip install psutil
!pip install humanize
import psutil
import humanize
import os, time
import GPUtil as GPU
GPUs = GPU.getGPUs()
# XXX: only one GPU on Colab and isn’t guaranteed
gpu = GPUs[0]
def worker():
if SHOW_GPU_USAGE_TIME == 0:
return;
while True:
process = psutil.Process(os.getpid())
print("Gen RAM Free: " + humanize.naturalsize( psutil.virtual_memory().available ), " I Proc size: " + humanize.naturalsize( process.memory_info().rss))
print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal))
time.sleep(SHOW_GPU_USAGE_TIME)
import threading
t = threading.Thread(target=worker, name='Monitor')
t.start()
Requirement already satisfied: gputil in /usr/local/lib/python3.6/dist-packages (1.4.0) Requirement already satisfied: psutil in /usr/local/lib/python3.6/dist-packages (5.4.8) Requirement already satisfied: humanize in /usr/local/lib/python3.6/dist-packages (0.5.1)
SHOW_GPU_USAGE_TIME = 1 # select the time
worker()
Gen RAM Free: 12.8 GB I Proc size: 125.0 MB GPU RAM Free: 15079MB | Used: 0MB | Util 0% | Total 15079MB Gen RAM Free: 12.8 GB I Proc size: 125.0 MB GPU RAM Free: 15079MB | Used: 0MB | Util 0% | Total 15079MB Gen RAM Free: 12.8 GB I Proc size: 125.0 MB GPU RAM Free: 15079MB | Used: 0MB | Util 0% | Total 15079MB Gen RAM Free: 12.8 GB I Proc size: 125.0 MB GPU RAM Free: 15079MB | Used: 0MB | Util 0% | Total 15079MB Gen RAM Free: 12.8 GB I Proc size: 125.0 MB GPU RAM Free: 15079MB | Used: 0MB | Util 0% | Total 15079MB Gen RAM Free: 12.8 GB I Proc size: 125.0 MB GPU RAM Free: 15079MB | Used: 0MB | Util 0% | Total 15079MB Gen RAM Free: 12.8 GB I Proc size: 125.0 MB GPU RAM Free: 15079MB | Used: 0MB | Util 0% | Total 15079MB Gen RAM Free: 12.8 GB I Proc size: 125.0 MB GPU RAM Free: 15079MB | Used: 0MB | Util 0% | Total 15079MB Gen RAM Free: 12.8 GB I Proc size: 125.0 MB GPU RAM Free: 15079MB | Used: 0MB | Util 0% | Total 15079MB
--------------------------------------------------------------------------- KeyboardInterrupt Traceback (most recent call last) <ipython-input-27-c82e034dc22c> in <module>() 1 SHOW_GPU_USAGE_TIME = 1 ----> 2 worker() <ipython-input-23-e0bf4f983098> in worker() 18 print("Gen RAM Free: " + humanize.naturalsize( psutil.virtual_memory().available ), " I Proc size: " + humanize.naturalsize( process.memory_info().rss)) 19 print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal)) ---> 20 time.sleep(SHOW_GPU_USAGE_TIME) 21 22 import threading KeyboardInterrupt: